From 8ff9207a95da232fdd74aefee60020877762e1eb Mon Sep 17 00:00:00 2001 From: Joshua Karp Date: Fri, 18 Feb 2022 11:26:44 +1100 Subject: [PATCH 01/39] feat: added `nodesGetAll` bin command and service handler Retrieves all buckets from the NodeGraph --- src/bin/nodes/CommandGetAll.ts | 77 +++++++++ src/bin/nodes/CommandNodes.ts | 2 + src/client/GRPCClientClient.ts | 8 + src/client/service/index.ts | 2 + src/client/service/nodesGetAll.ts | 68 ++++++++ .../js/polykey/v1/client_service_grpc_pb.d.ts | 17 ++ .../js/polykey/v1/client_service_grpc_pb.js | 22 +++ src/proto/js/polykey/v1/nodes/nodes_pb.d.ts | 22 +++ src/proto/js/polykey/v1/nodes/nodes_pb.js | 155 ++++++++++++++++++ .../schemas/polykey/v1/client_service.proto | 1 + .../schemas/polykey/v1/nodes/nodes.proto | 5 + 11 files changed, 379 insertions(+) create mode 100644 src/bin/nodes/CommandGetAll.ts create mode 100644 src/client/service/nodesGetAll.ts diff --git a/src/bin/nodes/CommandGetAll.ts b/src/bin/nodes/CommandGetAll.ts new file mode 100644 index 000000000..91f69f681 --- /dev/null +++ b/src/bin/nodes/CommandGetAll.ts @@ -0,0 +1,77 @@ +import type PolykeyClient from '../../PolykeyClient'; +import type nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; +import CommandPolykey from '../CommandPolykey'; +import * as binUtils from '../utils'; +import * as binOptions from '../utils/options'; +import * as binProcessors from '../utils/processors'; + +class CommandGetAll extends CommandPolykey { + constructor(...args: ConstructorParameters) { + super(...args); + this.name('getall'); + this.description('Get all Nodes from Node Graph'); + this.addOption(binOptions.nodeId); + this.addOption(binOptions.clientHost); + this.addOption(binOptions.clientPort); + this.action(async (options) => { + const { default: PolykeyClient } = await import('../../PolykeyClient'); + const utilsPB = await import('../../proto/js/polykey/v1/utils/utils_pb'); + + const clientOptions = await binProcessors.processClientOptions( + options.nodePath, + options.nodeId, + options.clientHost, + options.clientPort, + this.fs, + this.logger.getChild(binProcessors.processClientOptions.name), + ); + const meta = await binProcessors.processAuthentication( + options.passwordFile, + this.fs, + ); + let pkClient: PolykeyClient; + this.exitHandlers.handlers.push(async () => { + if (pkClient != null) await pkClient.stop(); + }); + let result: nodesPB.NodeBuckets; + try { + pkClient = await PolykeyClient.createPolykeyClient({ + nodePath: options.nodePath, + nodeId: clientOptions.nodeId, + host: clientOptions.clientHost, + port: clientOptions.clientPort, + logger: this.logger.getChild(PolykeyClient.name), + }); + const emptyMessage = new utilsPB.EmptyMessage(); + try { + result = await binUtils.retryAuthentication( + (auth) => pkClient.grpcClient.nodesGetAll(emptyMessage, auth), + meta, + ); + } catch (err) { + throw err; + } + let output: any = {}; + for (const [bucketIndex, bucket] of result.getBucketsMap().entries()) { + output[bucketIndex] = {}; + for (const [encodedId, address] of bucket.getNodeTableMap().entries()) { + output[bucketIndex][encodedId] = {}; + output[bucketIndex][encodedId].host = address.getHost(); + output[bucketIndex][encodedId].port = address.getPort(); + } + } + if (options.format === 'human') output = [result.getBucketsMap().getEntryList()]; + process.stdout.write( + binUtils.outputFormatter({ + type: options.format === 'json' ? 'json' : 'list', + data: output, + }), + ); + } finally { + if (pkClient! != null) await pkClient.stop(); + } + }); + } +} + +export default CommandGetAll; diff --git a/src/bin/nodes/CommandNodes.ts b/src/bin/nodes/CommandNodes.ts index 6827d01f3..0866a088f 100644 --- a/src/bin/nodes/CommandNodes.ts +++ b/src/bin/nodes/CommandNodes.ts @@ -2,6 +2,7 @@ import CommandAdd from './CommandAdd'; import CommandClaim from './CommandClaim'; import CommandFind from './CommandFind'; import CommandPing from './CommandPing'; +import CommandGetAll from './CommandGetAll'; import CommandPolykey from '../CommandPolykey'; class CommandNodes extends CommandPolykey { @@ -13,6 +14,7 @@ class CommandNodes extends CommandPolykey { this.addCommand(new CommandClaim(...args)); this.addCommand(new CommandFind(...args)); this.addCommand(new CommandPing(...args)); + this.addCommand(new CommandGetAll(...args)); } } diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index c69d58d89..78b13ec9d 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -901,6 +901,14 @@ class GRPCClientClient extends GRPCClient { )(...args); } + @ready(new clientErrors.ErrorClientClientDestroyed()) + public nodesGetAll(...args) { + return grpcUtils.promisifyUnaryCall( + this.client, + this.client.nodesGetAll, + )(...args); + } + @ready(new clientErrors.ErrorClientClientDestroyed()) public identitiesAuthenticate(...args) { return grpcUtils.promisifyReadableStreamCall( diff --git a/src/client/service/index.ts b/src/client/service/index.ts index 1e74eb9d8..d6b1dff6f 100644 --- a/src/client/service/index.ts +++ b/src/client/service/index.ts @@ -59,6 +59,7 @@ import nodesAdd from './nodesAdd'; import nodesClaim from './nodesClaim'; import nodesFind from './nodesFind'; import nodesPing from './nodesPing'; +import nodesGetAll from './nodesGetAll'; import notificationsClear from './notificationsClear'; import notificationsRead from './notificationsRead'; import notificationsSend from './notificationsSend'; @@ -165,6 +166,7 @@ function createService({ nodesClaim: nodesClaim(container), nodesFind: nodesFind(container), nodesPing: nodesPing(container), + nodesGetAll: nodesGetAll(container), notificationsClear: notificationsClear(container), notificationsRead: notificationsRead(container), notificationsSend: notificationsSend(container), diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts new file mode 100644 index 000000000..09c354ff2 --- /dev/null +++ b/src/client/service/nodesGetAll.ts @@ -0,0 +1,68 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { Authenticate } from '../types'; +import type { NodeGraph } from '../../nodes'; +import type { KeyManager } from '../../keys'; +import type { NodeId } from '../../nodes/types'; +import { IdInternal } from '@matrixai/id'; +import { utils as nodesUtils } from '../../nodes'; +import { utils as grpcUtils } from '../../grpc'; +import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; +import * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; + +/** + * Retrieves all nodes from all buckets in the NodeGraph. + */ +function nodesGetAll({ + nodeGraph, + keyManager, + authenticate, +}: { + nodeGraph: NodeGraph; + keyManager: KeyManager; + authenticate: Authenticate; +}) { + return async ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData, + ): Promise => { + try { + const response = new nodesPB.NodeBuckets(); + const metadata = await authenticate(call.metadata); + call.sendMetadata(metadata); + const buckets = await nodeGraph.getAllBuckets(); + for (const b of buckets) { + let index; + for (const id of Object.keys(b)) { + const encodedId = nodesUtils.encodeNodeId(IdInternal.fromString(id)); + const address = new nodesPB.Address() + .setHost(b[id].address.host) + .setPort(b[id].address.port); + // For every node in every bucket, add it to our message + if (!index) { + index = nodesUtils.calculateBucketIndex( + keyManager.getNodeId(), + IdInternal.fromString(id) + ); + } + // Need to either add node to an existing bucket, or create a new + // bucket (if doesn't exist) + let bucket = response.getBucketsMap().get(index); + if (bucket) { + bucket.getNodeTableMap().set(encodedId, address); + } else { + const newBucket = new nodesPB.NodeTable(); + newBucket.getNodeTableMap().set(encodedId, address); + response.getBucketsMap().set(index, newBucket); + } + } + } + callback(null, response); + return; + } catch (e) { + callback(grpcUtils.fromError(e)); + return; + } + }; +} + +export default nodesGetAll; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts index 023631a45..067688187 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts @@ -27,6 +27,7 @@ interface IClientServiceService extends grpc.ServiceDefinition; responseDeserialize: grpc.deserialize; } +interface IClientServiceService_INodesGetAll extends grpc.MethodDefinition { + path: "/polykey.v1.ClientService/NodesGetAll"; + requestStream: false; + responseStream: false; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} interface IClientServiceService_IKeysKeyPairRoot extends grpc.MethodDefinition { path: "/polykey.v1.ClientService/KeysKeyPairRoot"; requestStream: false; @@ -673,6 +683,7 @@ export interface IClientServiceServer extends grpc.UntypedServiceImplementation nodesPing: grpc.handleUnaryCall; nodesClaim: grpc.handleUnaryCall; nodesFind: grpc.handleUnaryCall; + nodesGetAll: grpc.handleUnaryCall; keysKeyPairRoot: grpc.handleUnaryCall; keysKeyPairReset: grpc.handleUnaryCall; keysKeyPairRenew: grpc.handleUnaryCall; @@ -756,6 +767,9 @@ export interface IClientServiceClient { nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; @@ -941,6 +955,9 @@ export class ClientServiceClient extends grpc.Client implements IClientServiceCl public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.js b/src/proto/js/polykey/v1/client_service_grpc_pb.js index ede2e9470..642127423 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.js +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.js @@ -212,6 +212,17 @@ function deserialize_polykey_v1_nodes_NodeAddress(buffer_arg) { return polykey_v1_nodes_nodes_pb.NodeAddress.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_polykey_v1_nodes_NodeBuckets(arg) { + if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeBuckets)) { + throw new Error('Expected argument of type polykey.v1.nodes.NodeBuckets'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_polykey_v1_nodes_NodeBuckets(buffer_arg) { + return polykey_v1_nodes_nodes_pb.NodeBuckets.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_polykey_v1_notifications_List(arg) { if (!(arg instanceof polykey_v1_notifications_notifications_pb.List)) { throw new Error('Expected argument of type polykey.v1.notifications.List'); @@ -557,6 +568,17 @@ nodesAdd: { responseSerialize: serialize_polykey_v1_nodes_NodeAddress, responseDeserialize: deserialize_polykey_v1_nodes_NodeAddress, }, + nodesGetAll: { + path: '/polykey.v1.ClientService/NodesGetAll', + requestStream: false, + responseStream: false, + requestType: polykey_v1_utils_utils_pb.EmptyMessage, + responseType: polykey_v1_nodes_nodes_pb.NodeBuckets, + requestSerialize: serialize_polykey_v1_utils_EmptyMessage, + requestDeserialize: deserialize_polykey_v1_utils_EmptyMessage, + responseSerialize: serialize_polykey_v1_nodes_NodeBuckets, + responseDeserialize: deserialize_polykey_v1_nodes_NodeBuckets, + }, // Keys keysKeyPairRoot: { path: '/polykey.v1.ClientService/KeysKeyPairRoot', diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts index 0da62ce43..79d0fbd58 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts @@ -98,6 +98,28 @@ export namespace Claim { } } +export class NodeBuckets extends jspb.Message { + + getBucketsMap(): jspb.Map; + clearBucketsMap(): void; + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): NodeBuckets.AsObject; + static toObject(includeInstance: boolean, msg: NodeBuckets): NodeBuckets.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: NodeBuckets, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): NodeBuckets; + static deserializeBinaryFromReader(message: NodeBuckets, reader: jspb.BinaryReader): NodeBuckets; +} + +export namespace NodeBuckets { + export type AsObject = { + + bucketsMap: Array<[number, NodeTable.AsObject]>, + } +} + export class Connection extends jspb.Message { getAId(): string; setAId(value: string): Connection; diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.js b/src/proto/js/polykey/v1/nodes/nodes_pb.js index 01d29ce4f..8fe0c189f 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.js +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.js @@ -25,6 +25,7 @@ goog.exportSymbol('proto.polykey.v1.nodes.Connection', null, global); goog.exportSymbol('proto.polykey.v1.nodes.CrossSign', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Node', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeAddress', null, global); +goog.exportSymbol('proto.polykey.v1.nodes.NodeBuckets', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeTable', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Relay', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Signature', null, global); @@ -112,6 +113,27 @@ if (goog.DEBUG && !COMPILED) { */ proto.polykey.v1.nodes.Claim.displayName = 'proto.polykey.v1.nodes.Claim'; } +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.polykey.v1.nodes.NodeBuckets = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.polykey.v1.nodes.NodeBuckets, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.polykey.v1.nodes.NodeBuckets.displayName = 'proto.polykey.v1.nodes.NodeBuckets'; +} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -956,6 +978,139 @@ proto.polykey.v1.nodes.Claim.prototype.setForceInvite = function(value) { +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.toObject = function(opt_includeInstance) { + return proto.polykey.v1.nodes.NodeBuckets.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.toObject = function(includeInstance, msg) { + var f, obj = { + bucketsMap: (f = msg.getBucketsMap()) ? f.toObject(includeInstance, proto.polykey.v1.nodes.NodeTable.toObject) : [] + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.polykey.v1.nodes.NodeBuckets; + return proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = msg.getBucketsMap(); + reader.readMessage(value, function(message, reader) { + jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readInt32, jspb.BinaryReader.prototype.readMessage, proto.polykey.v1.nodes.NodeTable.deserializeBinaryFromReader, 0, new proto.polykey.v1.nodes.NodeTable()); + }); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.polykey.v1.nodes.NodeBuckets} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getBucketsMap(true); + if (f && f.getLength() > 0) { + f.serializeBinary(1, writer, jspb.BinaryWriter.prototype.writeInt32, jspb.BinaryWriter.prototype.writeMessage, proto.polykey.v1.nodes.NodeTable.serializeBinaryToWriter); + } +}; + + +/** + * map buckets = 1; + * @param {boolean=} opt_noLazyCreate Do not create the map if + * empty, instead returning `undefined` + * @return {!jspb.Map} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.getBucketsMap = function(opt_noLazyCreate) { + return /** @type {!jspb.Map} */ ( + jspb.Message.getMapField(this, 1, opt_noLazyCreate, + proto.polykey.v1.nodes.NodeTable)); +}; + + +/** + * Clears values from the map. The map will be non-null. + * @return {!proto.polykey.v1.nodes.NodeBuckets} returns this + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.clearBucketsMap = function() { + this.getBucketsMap().clear(); + return this;}; + + + + + if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. diff --git a/src/proto/schemas/polykey/v1/client_service.proto b/src/proto/schemas/polykey/v1/client_service.proto index 57788c678..81782f13b 100644 --- a/src/proto/schemas/polykey/v1/client_service.proto +++ b/src/proto/schemas/polykey/v1/client_service.proto @@ -26,6 +26,7 @@ service ClientService { rpc NodesPing(polykey.v1.nodes.Node) returns (polykey.v1.utils.StatusMessage); rpc NodesClaim(polykey.v1.nodes.Claim) returns (polykey.v1.utils.StatusMessage); rpc NodesFind(polykey.v1.nodes.Node) returns (polykey.v1.nodes.NodeAddress); + rpc NodesGetAll(polykey.v1.utils.EmptyMessage) returns (polykey.v1.nodes.NodeBuckets); // Keys rpc KeysKeyPairRoot (polykey.v1.utils.EmptyMessage) returns (polykey.v1.keys.KeyPair); diff --git a/src/proto/schemas/polykey/v1/nodes/nodes.proto b/src/proto/schemas/polykey/v1/nodes/nodes.proto index 4c5d64a51..bd2b54f85 100644 --- a/src/proto/schemas/polykey/v1/nodes/nodes.proto +++ b/src/proto/schemas/polykey/v1/nodes/nodes.proto @@ -25,6 +25,11 @@ message Claim { bool force_invite = 2; } +// Bucket index -> a node bucket (from NodeGraph) +message NodeBuckets { + map buckets = 1; +} + // Agent specific. message Connection { From 3c7842766d6fc669b761978daf2e0a7120cc8d30 Mon Sep 17 00:00:00 2001 From: Roger Qiu Date: Mon, 21 Feb 2022 17:07:49 +1100 Subject: [PATCH 02/39] fix: general fixes - added getBuckets test for distance and lastUpdated order - resetting buckets work - changing utility names --- src/bin/nodes/CommandGetAll.ts | 8 +- src/client/service/nodesGetAll.ts | 18 +- src/network/utils.ts | 4 +- src/nodes/NodeConnectionManager.ts | 50 +- src/nodes/NodeGraph.ts | 739 +++++++---- src/nodes/NodeManager.ts | 40 +- src/nodes/errors.ts | 6 + src/nodes/types.ts | 59 +- src/nodes/utils.ts | 293 +++- src/types.ts | 20 + src/utils/index.ts | 1 + src/utils/random.ts | 11 + src/utils/utils.ts | 63 + src/validation/utils.ts | 16 +- test-iterator.ts | 31 + test-lexi.ts | 4 + test-nodegraph.ts | 107 ++ test-nodeidgen.ts | 44 + test-order.ts | 98 ++ test-sorting.ts | 28 + test-split.ts | 37 + test-trie.ts | 29 + tests/acl/ACL.test.ts | 29 +- tests/agent/utils.ts | 5 +- tests/bin/nodes/add.test.ts | 3 +- tests/bin/vaults/vaults.test.ts | 8 +- tests/claims/utils.test.ts | 5 +- .../service/gestaltsDiscoveryByNode.test.ts | 3 +- .../client/service/notificationsRead.test.ts | 3 +- tests/discovery/Discovery.test.ts | 2 +- tests/gestalts/GestaltGraph.test.ts | 10 +- tests/grpc/GRPCClient.test.ts | 4 +- tests/identities/IdentitiesManager.test.ts | 4 +- tests/network/Proxy.test.ts | 7 +- tests/nodes/NodeConnection.test.ts | 11 +- .../NodeConnectionManager.general.test.ts | 15 +- tests/nodes/NodeGraph.test.ts | 1175 +++++++++-------- tests/nodes/NodeGraph.test.ts.old | 624 +++++++++ tests/nodes/utils.test.ts | 195 ++- tests/nodes/utils.ts | 22 +- tests/notifications/utils.test.ts | 7 +- tests/sigchain/Sigchain.test.ts | 23 +- tests/status/Status.test.ts | 8 +- tests/utils.ts | 208 +-- tests/vaults/VaultOps.test.ts | 3 +- 45 files changed, 2977 insertions(+), 1103 deletions(-) create mode 100644 src/utils/random.ts create mode 100644 test-iterator.ts create mode 100644 test-lexi.ts create mode 100644 test-nodegraph.ts create mode 100644 test-nodeidgen.ts create mode 100644 test-order.ts create mode 100644 test-sorting.ts create mode 100644 test-split.ts create mode 100644 test-trie.ts create mode 100644 tests/nodes/NodeGraph.test.ts.old diff --git a/src/bin/nodes/CommandGetAll.ts b/src/bin/nodes/CommandGetAll.ts index 91f69f681..5d1b5a8fc 100644 --- a/src/bin/nodes/CommandGetAll.ts +++ b/src/bin/nodes/CommandGetAll.ts @@ -54,13 +54,17 @@ class CommandGetAll extends CommandPolykey { let output: any = {}; for (const [bucketIndex, bucket] of result.getBucketsMap().entries()) { output[bucketIndex] = {}; - for (const [encodedId, address] of bucket.getNodeTableMap().entries()) { + for (const [encodedId, address] of bucket + .getNodeTableMap() + .entries()) { output[bucketIndex][encodedId] = {}; output[bucketIndex][encodedId].host = address.getHost(); output[bucketIndex][encodedId].port = address.getPort(); } } - if (options.format === 'human') output = [result.getBucketsMap().getEntryList()]; + if (options.format === 'human') { + output = [result.getBucketsMap().getEntryList()]; + } process.stdout.write( binUtils.outputFormatter({ type: options.format === 'json' ? 'json' : 'list', diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts index 09c354ff2..6a658fedd 100644 --- a/src/client/service/nodesGetAll.ts +++ b/src/client/service/nodesGetAll.ts @@ -3,11 +3,11 @@ import type { Authenticate } from '../types'; import type { NodeGraph } from '../../nodes'; import type { KeyManager } from '../../keys'; import type { NodeId } from '../../nodes/types'; +import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; import { IdInternal } from '@matrixai/id'; import { utils as nodesUtils } from '../../nodes'; import { utils as grpcUtils } from '../../grpc'; import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; -import * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; /** * Retrieves all nodes from all buckets in the NodeGraph. @@ -29,24 +29,28 @@ function nodesGetAll({ const response = new nodesPB.NodeBuckets(); const metadata = await authenticate(call.metadata); call.sendMetadata(metadata); - const buckets = await nodeGraph.getAllBuckets(); + // FIXME: + // const buckets = await nodeGraph.getAllBuckets(); + const buckets: any = []; for (const b of buckets) { let index; for (const id of Object.keys(b)) { - const encodedId = nodesUtils.encodeNodeId(IdInternal.fromString(id)); + const encodedId = nodesUtils.encodeNodeId( + IdInternal.fromString(id), + ); const address = new nodesPB.Address() .setHost(b[id].address.host) .setPort(b[id].address.port); // For every node in every bucket, add it to our message if (!index) { - index = nodesUtils.calculateBucketIndex( + index = nodesUtils.bucketIndex( keyManager.getNodeId(), - IdInternal.fromString(id) + IdInternal.fromString(id), ); } - // Need to either add node to an existing bucket, or create a new + // Need to either add node to an existing bucket, or create a new // bucket (if doesn't exist) - let bucket = response.getBucketsMap().get(index); + const bucket = response.getBucketsMap().get(index); if (bucket) { bucket.getNodeTableMap().set(encodedId, address); } else { diff --git a/src/network/utils.ts b/src/network/utils.ts index 1df7faa7f..c5786a754 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -45,10 +45,12 @@ function isHostname(hostname: any): hostname is Hostname { /** * Ports must be numbers between 0 and 65535 inclusive + * If connect is true, then port must be a number between 1 and 65535 inclusive */ -function isPort(port: any): port is Port { +function isPort(port: any, connect: boolean = false): port is Port { if (typeof port !== 'number') return false; if (port < 0 || port > 65535) return false; + if (connect && port === 0) return false; return true; } diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index e51ccc803..5c1b34cb7 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -10,6 +10,7 @@ import type { NodeId, NodeIdString, SeedNodes, + NodeEntry, } from './types'; import type { DBTransaction } from '@matrixai/db'; import { withF } from '@matrixai/resources'; @@ -367,7 +368,7 @@ class NodeConnectionManager { public async findNode(targetNodeId: NodeId): Promise { // First check if we already have an existing ID -> address record - let address = await this.nodeGraph.getNode(targetNodeId); + let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network if (address == null) { address = await this.getClosestGlobalNodes(targetNodeId); @@ -461,7 +462,7 @@ class NodeConnectionManager { // getClosestGlobalNodes()? const contacted: { [nodeId: string]: boolean } = {}; // Iterate until we've found and contacted k nodes - while (Object.keys(contacted).length <= this.nodeGraph.maxNodesPerBucket) { + while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { // While (!foundTarget) { // Remove the node from the front of the array const nextNode = shortlist.shift(); @@ -492,27 +493,31 @@ class NodeConnectionManager { ); // Check to see if any of these are the target node. At the same time, add // them to the shortlist - for (const nodeData of foundClosest) { + for (const [nodeId, nodeData] of foundClosest) { // Ignore any nodes that have been contacted - if (contacted[nodeData.id]) { + if (contacted[nodeId]) { continue; } - if (nodeData.id.equals(targetNodeId)) { - await this.nodeGraph.setNode(nodeData.id, nodeData.address); + if (nodeId.equals(targetNodeId)) { + await this.nodeGraph.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it // in the shortlist break; } - shortlist.push(nodeData); + shortlist.push([nodeId, nodeData]); } // To make the number of jumps relatively short, should connect to the nodes // closest to the target first, and ask if they know of any closer nodes // than we can simply unshift the first (closest) element from the shortlist - shortlist.sort(function (a: NodeData, b: NodeData) { - if (a.distance > b.distance) { + const distance = (nodeId: NodeId) => + nodesUtils.nodeDistance(targetNodeId, nodeId); + shortlist.sort(function ([nodeIdA], [nodeIdB]) { + const distanceA = distance(nodeIdA); + const distanceB = distance(nodeIdB); + if (distanceA > distanceB) { return 1; - } else if (a.distance < b.distance) { + } else if (distanceA < distanceB) { return -1; } else { return 0; @@ -533,7 +538,7 @@ class NodeConnectionManager { public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, - ): Promise> { + ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); @@ -541,20 +546,22 @@ class NodeConnectionManager { return this.withConnF(nodeId, async (connection) => { const client = await connection.getClient(); const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array = []; + const nodes: Array<[NodeId, NodeData]> = []; // Loop over each map element (from the returned response) and populate nodes response.getNodeTableMap().forEach((address, nodeIdString: string) => { const nodeId = nodesUtils.decodeNodeId(nodeIdString); // If the nodeId is not valid we don't add it to the list of nodes if (nodeId != null) { - nodes.push({ - id: nodeId, - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, + nodes.push([ + nodeId, + { + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + lastUpdated: 0, // FIXME? }, - distance: nodesUtils.calculateDistance(targetNodeId, nodeId), - }); + ]); } }); return nodes; @@ -588,8 +595,9 @@ class NodeConnectionManager { seedNodeId, this.keyManager.getNodeId(), ); - for (const n of nodes) { - await this.nodeGraph.setNode(n.id, n.address); + for (const [nodeId, nodeData] of nodes) { + // FIXME: this should be the `nodeManager.setNode` + await this.nodeGraph.setNode(nodeId, nodeData.address); } } } diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 4d623dbce..9fa404896 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,21 +1,27 @@ -import type { DB, DBTransaction, KeyPath, LevelPath } from '@matrixai/db'; -import type { NodeAddress, NodeBucket, NodeId } from './types'; +import type { DB, DBTransaction, LevelPath } from '@matrixai/db'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeData, + NodeBucketMeta, + NodeBucketIndex, + NodeGraphSpace, +} from './types'; import type KeyManager from '../keys/KeyManager'; -import type { Host, Hostname, Port } from '../network/types'; -import lexi from 'lexicographic-integer'; import Logger from '@matrixai/logger'; import { CreateDestroyStartStop, ready, } from '@matrixai/async-init/dist/CreateDestroyStartStop'; import { IdInternal } from '@matrixai/id'; -import { withF } from '@matrixai/resources'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; +import { getUnixtime, never } from '../utils'; /** * NodeGraph is an implementation of Kademlia for maintaining peer to peer information - * We maintain a map of buckets. Where each bucket has k number of node infos + * It is a database of fixed-size buckets, where each bucket contains NodeId -> NodeData */ interface NodeGraph extends CreateDestroyStartStop {} @CreateDestroyStartStop( @@ -26,11 +32,13 @@ class NodeGraph { public static async createNodeGraph({ db, keyManager, + nodeIdBits = 256, logger = new Logger(this.name), fresh = false, }: { db: DB; keyManager: KeyManager; + nodeIdBits?: number; logger?: Logger; fresh?: boolean; }): Promise { @@ -38,6 +46,7 @@ class NodeGraph { const nodeGraph = new NodeGraph({ db, keyManager, + nodeIdBits, logger, }); await nodeGraph.start({ fresh }); @@ -46,339 +55,565 @@ class NodeGraph { } /** - * Max number of nodes in each k-bucket (a.k.a. k) + * Bit size of the NodeIds + * This equals the number of buckets */ - public readonly maxNodesPerBucket: number = 20; + public readonly nodeIdBits: number; + /** + * Max number of nodes in each k-bucket + */ + public readonly nodeBucketLimit: number = 20; protected logger: Logger; protected db: DB; protected keyManager: KeyManager; + protected space: NodeGraphSpace; protected nodeGraphDbPath: LevelPath = [this.constructor.name]; - /** - * Buckets stores NodeBucketIndex -> NodeBucket - */ - protected nodeGraphBucketsDbPath: LevelPath = [ - this.constructor.name, - 'buckets', - ]; + protected nodeGraphMetaDbPath: LevelPath; + protected nodeGraphBucketsDbPath: LevelPath; + protected nodeGraphLastUpdatedDbPath: LevelPath; constructor({ db, keyManager, + nodeIdBits, logger, }: { db: DB; keyManager: KeyManager; + nodeIdBits: number; logger: Logger; }) { this.logger = logger; this.db = db; this.keyManager = keyManager; + this.nodeIdBits = nodeIdBits; } public async start({ fresh = false, - }: { - fresh?: boolean; - } = {}) { + }: { fresh?: boolean } = {}): Promise { this.logger.info(`Starting ${this.constructor.name}`); - if (fresh) { - await this.db.clear(this.nodeGraphDbPath); - } + const space = await this.db.withTransactionF(async (tran) => { + if (fresh) { + await tran.clear(this.nodeGraphDbPath); + } + // Space key is used to create a swappable sublevel + // when remapping the buckets during `this.refreshBuckets` + return await this.setupSpace(tran); + }); + // Bucket metadata sublevel: `!meta!! -> value` + this.nodeGraphMetaDbPath = [...this.nodeGraphDbPath, 'meta' + space]; + // Bucket sublevel: `!buckets!! -> NodeData` + // The BucketIndex can range from 0 to NodeId bitsize minus 1 + // So 256 bits means 256 buckets of 0 to 255 + this.nodeGraphBucketsDbPath = [...this.nodeGraphDbPath, 'buckets' + space]; + // Last updated sublevel: `!lastUpdated!!- -> NodeId` + // This is used as a sorted index of the NodeId by `lastUpdated` timestamp + // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `lastUpdated` timestamp + this.nodeGraphLastUpdatedDbPath = [ + ...this.nodeGraphDbPath, + 'lastUpdated' + space, + ]; + this.space = space; this.logger.info(`Started ${this.constructor.name}`); } - public async stop() { + public async stop(): Promise { this.logger.info(`Stopping ${this.constructor.name}`); this.logger.info(`Stopped ${this.constructor.name}`); } - public async destroy() { + public async destroy(): Promise { this.logger.info(`Destroying ${this.constructor.name}`); + // If the DB was stopped, the existing sublevel `this.nodeGraphDb` will not be valid + // Therefore we recreate the sublevel here await this.db.clear(this.nodeGraphDbPath); this.logger.info(`Destroyed ${this.constructor.name}`); } - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async withTransactionF( - f: (tran: DBTransaction) => Promise, - ): Promise { - return withF([this.db.transaction()], ([tran]) => f(tran)); - } - /** - * Retrieves the node Address - * @param nodeId node ID of the target node - * @param tran - * @returns Node Address of the target node + * Sets up the space key + * The space string is suffixed to the `buckets` and `meta` sublevels + * This is used to allow swapping of sublevels when remapping buckets + * during `this.refreshBuckets` */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getNode( - nodeId: NodeId, - tran?: DBTransaction, - ): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => this.getNode(nodeId, tran)); - } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket != null && nodeId in bucket) { - return bucket[nodeId].address; + protected async setupSpace(tran: DBTransaction): Promise { + let space = await tran.get([ + ...this.nodeGraphDbPath, + 'space', + ]); + if (space != null) { + return space; } - return; + space = '0'; + await tran.put([...this.nodeGraphDbPath, 'space'], space); + return space; } - /** - * Determines whether a node ID -> node address mapping exists in this node's - * node table. - * @param targetNodeId the node ID of the node to find - * @param tran - * @returns true if the node exists in the table, false otherwise - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async knowsNode( - targetNodeId: NodeId, - tran?: DBTransaction, - ): Promise { - return !!(await this.getNode(targetNodeId, tran)); + public async getNode( + nodeId: NodeId, + tran: DBTransaction, + ): Promise { + const [bucketIndex] = this.bucketIndex(nodeId); + const bucketDomain = [ + ...this.nodeGraphBucketsDbPath, + nodesUtils.bucketKey(bucketIndex), + nodesUtils.bucketDbKey(nodeId), + ]; + return await tran.get(bucketDomain); } /** - * Returns the specified bucket if it exists - * @param bucketIndex - * @param tran + * Get all nodes + * Nodes are always sorted by `NodeBucketIndex` first + * Then secondly by the node IDs + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getBucket( - bucketIndex: number, - tran?: DBTransaction, - ): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => - this.getBucket(bucketIndex, tran), - ); - } - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - lexi.pack(bucketIndex, 'hex'), - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - // Cast the non-primitive types correctly (ensures type safety when using them) - for (const nodeId in bucket) { - bucket[nodeId].address.host = bucket[nodeId].address.host as - | Host - | Hostname; - bucket[nodeId].address.port = bucket[nodeId].address.port as Port; - bucket[nodeId].lastUpdated = new Date(bucket[nodeId].lastUpdated); + public async *getNodes( + order: 'asc' | 'desc' = 'asc', + tran: DBTransaction, + ): AsyncGenerator<[NodeId, NodeData]> { + for await (const [key, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const { nodeId } = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + yield [nodeId, nodeData]; } - return bucket; } - /** - * Sets a node to the bucket database - * This may delete an existing node if the bucket is filled up - */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => - this.setNode(nodeId, nodeAddress, tran), + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; + const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; + const nodeData = await tran.get([ + ...bucketPath, + nodesUtils.bucketDbKey(nodeId), + ]); + // If this is a new entry, check the bucket limit + if (nodeData == null) { + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + if (count < this.nodeBucketLimit) { + // Increment the bucket count + await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); + } else { + // Remove the oldest entry in the bucket + let oldestLastUpdatedKey: Buffer; + let oldestNodeId: NodeId; + for await (const [key] of tran.iterator( + { + limit: 1, + values: false, + }, + this.nodeGraphLastUpdatedDbPath, + )) { + oldestLastUpdatedKey = key as unknown as Buffer; + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( + key as unknown as Buffer, + )); + } + await tran.del([...bucketPath, oldestNodeId!.toBuffer()]); + await tran.del([...lastUpdatedPath, oldestLastUpdatedKey!]); + } + } else { + // This is an existing entry, so the index entry must be reset + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, ); + await tran.del([...lastUpdatedPath, lastUpdatedKey]); } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - let bucket = await tran.get(bucketPath); - if (bucket == null) { - bucket = {}; - } - bucket[nodeId] = { + const lastUpdated = getUnixtime(); + await tran.put([...bucketPath, nodesUtils.bucketDbKey(nodeId)], { address: nodeAddress, - lastUpdated: new Date(), - }; - // Perform the check on size after we add/update the node. If it's an update, - // then we don't need to perform the deletion - let bucketEntries = Object.entries(bucket); - if (bucketEntries.length > this.maxNodesPerBucket) { - const leastActive = bucketEntries.reduce((prev, curr) => { - return new Date(prev[1].lastUpdated) < new Date(curr[1].lastUpdated) - ? prev - : curr; - }); - delete bucket[leastActive[0]]; - bucketEntries = Object.entries(bucket); - // For safety, make sure that the bucket is actually at maxNodesPerBucket - if (bucketEntries.length !== this.maxNodesPerBucket) { - throw new nodesErrors.ErrorNodeGraphOversizedBucket(); - } + lastUpdated, + }); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + lastUpdated, + nodeId, + ); + await tran.put( + [...lastUpdatedPath, lastUpdatedKey], + nodesUtils.bucketDbKey(nodeId), + true, + ); + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; + const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; + const nodeData = await tran.get([ + ...bucketPath, + nodesUtils.bucketDbKey(nodeId), + ]); + if (nodeData != null) { + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); + await tran.del([...bucketPath, nodesUtils.bucketDbKey(nodeId)]); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, + ); + await tran.del([...lastUpdatedPath, lastUpdatedKey]); } - await tran.put(bucketPath, bucket); } /** - * Updates an existing node - * It will update the lastUpdated time - * Optionally it can replace the NodeAddress + * Gets a bucket + * The bucket's node IDs is sorted lexicographically by default + * Alternatively you can acquire them sorted by lastUpdated timestamp + * or by distance to the own NodeId */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, - tran?: DBTransaction, - ): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => - this.updateNode(nodeId, nodeAddress, tran), + public async getBucket( + bucketIndex: NodeBucketIndex, + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + tran: DBTransaction, + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, ); } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket != null && nodeId in bucket) { - bucket[nodeId].lastUpdated = new Date(); - if (nodeAddress != null) { - bucket[nodeId].address = nodeAddress; + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const [key, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + [...this.nodeGraphBucketsDbPath, bucketKey], + )) { + const nodeId = nodesUtils.parseBucketDbKey(key as unknown as Buffer); + bucket.push([nodeId, nodeData]); + } + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + } else if (sort === 'lastUpdated') { + const bucketDbIterator = tran.iterator( + { valueAsBuffer: false }, + [...this.nodeGraphBucketsDbPath, bucketKey], + ); + try { + for await (const [, nodeIdBuffer] of tran.iterator( + { + reverse: order !== 'asc', + }, + [...this.nodeGraphLastUpdatedDbPath, bucketKey], + )) { + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + bucketDbIterator.seek(nodeIdBuffer); + // @ts-ignore + const iteratorResult = await bucketDbIterator.next(); + if (iteratorResult == null) never(); + const [, nodeData] = iteratorResult; + bucket.push([nodeId, nodeData]); + } + } finally { + // @ts-ignore + await bucketDbIterator.end(); } - await tran.put(bucketPath, bucket); - } else { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } + return bucket; } /** - * Removes a node from the bucket database - * @param nodeId - * @param tran + * Gets all buckets + * Buckets are always sorted by `NodeBucketIndex` first + * Then secondly by the `sort` parameter + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc + * NodeBucketIndex asc, distance asc + * NodeBucketIndex desc, distance desc + * NodeBucketIndex asc, lastUpdated asc + * NodeBucketIndex desc, lastUpdated desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => - this.unsetNode(nodeId, tran), + public async *getBuckets( + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + tran: DBTransaction, + ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { + let bucketIndex: NodeBucketIndex | undefined = undefined; + let bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const [key, nodeData] of tran.iterator( + { + reverse: order !== 'asc', + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + } + } else if (sort === 'lastUpdated') { + const bucketsDbIterator = tran.iterator( + { valueAsBuffer: false }, + this.nodeGraphBucketsDbPath, ); + try { + for await (const [key] of tran.iterator( + { + reverse: order !== 'asc', + }, + this.nodeGraphLastUpdatedDbPath, + )) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key as unknown as Buffer); + bucketsDbIterator.seek(nodesUtils.bucketsDbKey(bucketIndex_, nodeId)); + // @ts-ignore + const iteratorResult = await bucketsDbIterator.next(); + if (iteratorResult == null) never(); + const [, nodeData] = iteratorResult; + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + yield [bucketIndex, bucket]; + } + } finally { + // @ts-ignore + await bucketsDbIterator.end(); + } } - const bucketIndex = this.getBucketIndex(nodeId); - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - const bucket = await tran.get(bucketPath); - if (bucket == null) { - return; - } - delete bucket[nodeId]; - if (Object.keys(bucket).length === 0) { - await tran.del(bucketPath); - } else { - await tran.put(bucketPath, bucket); + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async resetBuckets( + nodeIdOwn: NodeId, + tran: DBTransaction, + ): Promise { + // Setup new space + const spaceNew = this.space === '0' ? '1' : '0'; + const nodeGraphMetaDbPathNew = [...this.nodeGraphDbPath, 'meta' + spaceNew]; + const nodeGraphBucketsDbPathNew = [ + ...this.nodeGraphDbPath, + 'buckets' + spaceNew, + ]; + const nodeGraphLastUpdatedDbPathNew = [ + ...this.nodeGraphDbPath, + 'index' + spaceNew, + ]; + // Clear the new space (in case it wasn't cleaned properly last time) + await tran.clear(nodeGraphMetaDbPathNew); + await tran.clear(nodeGraphBucketsDbPathNew); + await tran.clear(nodeGraphLastUpdatedDbPathNew); + // Iterating over all entries across all buckets + + for await (const [key, nodeData] of tran.iterator( + { valueAsBuffer: false }, + this.nodeGraphBucketsDbPath, + )) { + // The key is a combined bucket key and node ID + const { nodeId } = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + // If the new own node ID is one of the existing node IDs, it is just dropped + // We only map to the new bucket if it isn't one of the existing node IDs + if (nodeId.equals(nodeIdOwn)) { + continue; + } + const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKeyNew = nodesUtils.bucketKey(bucketIndexNew); + const metaPathNew = [...nodeGraphMetaDbPathNew, bucketKeyNew]; + const bucketPathNew = [...nodeGraphBucketsDbPathNew, bucketKeyNew]; + const indexPathNew = [...nodeGraphLastUpdatedDbPathNew, bucketKeyNew]; + const countNew = (await tran.get([...metaPathNew, 'count'])) ?? 0; + if (countNew < this.nodeBucketLimit) { + await tran.put([...metaPathNew, 'count'], countNew + 1); + } else { + let oldestIndexKey: Buffer | undefined = undefined; + let oldestNodeId: NodeId | undefined = undefined; + for await (const [key] of tran.iterator( + { + limit: 1, + }, + indexPathNew, + )) { + oldestIndexKey = key as unknown as Buffer; + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( + key as unknown as Buffer, + )); + } + await tran.del([ + ...bucketPathNew, + nodesUtils.bucketDbKey(oldestNodeId!), + ]); + await tran.del([...indexPathNew, oldestIndexKey!]); + } + await tran.put( + [...bucketPathNew, nodesUtils.bucketDbKey(nodeId)], + nodeData, + ); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, + ); + await tran.put( + [...indexPathNew, lastUpdatedKey], + nodesUtils.bucketDbKey(nodeId), + true, + ); } + // Swap to the new space + await tran.put([...this.nodeGraphDbPath, 'space'], spaceNew); + // Clear old space + await tran.clear(this.nodeGraphMetaDbPath); + await tran.clear(this.nodeGraphBucketsDbPath); + await tran.clear(this.nodeGraphLastUpdatedDbPath); + // Swap the spaces + this.space = spaceNew; + this.nodeGraphMetaDbPath = nodeGraphMetaDbPathNew; + this.nodeGraphBucketsDbPath = nodeGraphBucketsDbPathNew; + this.nodeGraphLastUpdatedDbPath = nodeGraphLastUpdatedDbPathNew; } - /** - * Find the correct index of the k-bucket to add a new node to (for this node's - * bucket database). Packs it as a lexicographic integer, such that the order - * of buckets in leveldb is numerical order. - */ - protected getBucketIndex(nodeId: NodeId): string { - const index = nodesUtils.calculateBucketIndex( - this.keyManager.getNodeId(), - nodeId, - ); - return lexi.pack(index, 'hex') as string; + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucketMeta( + bucketIndex: NodeBucketIndex, + tran: DBTransaction, + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); + } + const metaDomain = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + ]; + const props = await Promise.all([ + tran.get([...metaDomain, 'count']), + ]); + const [count] = props; + // Bucket meta properties have defaults + return { + count: count ?? 0, + }; } - /** - * Returns all of the buckets in an array - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getAllBuckets(tran?: DBTransaction): Promise> { - if (tran == null) { - return this.withTransactionF(async (tran) => this.getAllBuckets(tran)); + public async getBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + tran: DBTransaction, + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); } - const buckets: Array = []; - for await (const [, bucket] of tran.iterator( - { keys: false, valueAsBuffer: false }, - [...this.nodeGraphBucketsDbPath], - )) { - buckets.push(bucket); + const metaDomain = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + ]; + // Bucket meta properties have defaults + let value; + switch (key) { + case 'count': + value = (await tran.get([...metaDomain, key])) ?? 0; + break; } - return buckets; + return value; } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. - * NOTE: original nodes may be lost in this process. If they're redistributed - * to a newly full bucket, the least active nodes in the newly full bucket - * will be removed. + * Sets a bucket meta property + * This is protected because users cannot directly manipulate bucket meta */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async refreshBuckets(tran?: DBTransaction): Promise { - if (tran == null) { - return this.withTransactionF(async (tran) => this.refreshBuckets(tran)); - } - // Get a local copy of all the buckets - const buckets = await this.getAllBuckets(tran); - // Wrap as a batch operation. We want to rollback if we encounter any - // errors (such that we don't clear the DB without re-adding the nodes) - // 1. Delete every bucket - for await (const [keyPath] of tran.iterator({ values: false }, [ - ...this.nodeGraphBucketsDbPath, - ])) { - const key = keyPath[0].toString(); - const hexBucketPath = [...this.nodeGraphBucketsDbPath, key]; - await tran.del(hexBucketPath); - } - const tempBuckets: Record = {}; - // 2. Re-add all the nodes from all buckets - for (const b of buckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - const newIndex = this.getBucketIndex(nodeId); - let expectedBucket = tempBuckets[newIndex]; - // The following is more or less copied from setNodeOps - if (expectedBucket == null) { - expectedBucket = {}; - } - const bucketEntries = Object.entries(expectedBucket); - // Add the old node - expectedBucket[nodeId] = { - address: b[nodeId].address, - lastUpdated: b[nodeId].lastUpdated, - }; - // If, with the old node added, we exceed the limit - if (bucketEntries.length > this.maxNodesPerBucket) { - // Then, with the old node added, find the least active and remove - const leastActive = bucketEntries.reduce((prev, curr) => { - return prev[1].lastUpdated < curr[1].lastUpdated ? prev : curr; - }); - delete expectedBucket[leastActive[0]]; - } - // Add this reconstructed bucket (with old node) into the temp storage - tempBuckets[newIndex] = expectedBucket; - } - } - // Now that we've reconstructed all the buckets, perform batch operations - // on a bucket level (i.e. per bucket, instead of per node) - for (const bucketIndex in tempBuckets) { - const bucketPath = [ - ...this.nodeGraphBucketsDbPath, - bucketIndex, - ] as unknown as KeyPath; - await tran.put(bucketPath, tempBuckets[bucketIndex]); + protected async setBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + value: NodeBucketMeta[Key], + tran: DBTransaction, + ): Promise { + const metaKey = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + key, + ]; + await tran.put(metaKey, value); + return; + } + + /** + * Derive the bucket index of the k-buckets from the new `NodeId` + * The bucket key is the string encoded version of bucket index + * that preserves lexicographic order + */ + protected bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + const nodeIdOwn = this.keyManager.getNodeId(); + if (nodeId.equals(nodeIdOwn)) { + throw new nodesErrors.ErrorNodeGraphSameNodeId(); } + const bucketIndex = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + return [bucketIndex, bucketKey]; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 6adf77867..ac9d3a4a4 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -313,7 +313,7 @@ class NodeManager { nodeId: NodeId, tran?: DBTransaction, ): Promise { - return await this.nodeGraph.getNode(nodeId, tran); + return (await this.nodeGraph.getNode(nodeId, tran))?.address; } /** @@ -326,7 +326,7 @@ class NodeManager { targetNodeId: NodeId, tran?: DBTransaction, ): Promise { - return await this.nodeGraph.knowsNode(targetNodeId, tran); + return (await this.nodeGraph.getNode(targetNodeId, tran)) != null; } /** @@ -350,16 +350,16 @@ class NodeManager { return await this.nodeGraph.setNode(nodeId, nodeAddress, tran); } - /** - * Updates the node in the NodeGraph - */ - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, - tran?: DBTransaction, - ): Promise { - return await this.nodeGraph.updateNode(nodeId, nodeAddress, tran); - } + // /** + // * Updates the node in the NodeGraph + // */ + // public async updateNode( + // nodeId: NodeId, + // nodeAddress?: NodeAddress, + // tran?: DBTransaction, + // ): Promise { + // return await this.nodeGraph.updateNode(nodeId, nodeAddress, tran); + // } /** * Removes a node from the NodeGraph @@ -368,19 +368,21 @@ class NodeManager { return await this.nodeGraph.unsetNode(nodeId, tran); } - /** - * Gets all buckets from the NodeGraph - */ - public async getAllBuckets(tran?: DBTransaction): Promise> { - return await this.nodeGraph.getAllBuckets(tran); - } + // /** + // * Gets all buckets from the NodeGraph + // */ + // public async getAllBuckets(tran?: DBTransaction): Promise> { + // return await this.nodeGraph.getAllBuckets(tran); + // } + // FIXME /** * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. */ public async refreshBuckets(tran?: DBTransaction): Promise { - return await this.nodeGraph.refreshBuckets(tran); + throw Error('fixme'); + // Return await this.nodeGraph.refreshBuckets(tran); } } diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index 1c491bde4..83a5597d4 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -37,6 +37,11 @@ class ErrorNodeGraphSameNodeId extends ErrorNodes { exitCode = sysexits.USAGE; } +class ErrorNodeGraphBucketIndex extends ErrorNodes { + static description: 'Bucket index is out of range'; + exitCode = sysexits.USAGE; +} + class ErrorNodeConnectionDestroyed extends ErrorNodes { static description = 'NodeConnection is destroyed'; exitCode = sysexits.USAGE; @@ -76,6 +81,7 @@ export { ErrorNodeGraphEmptyDatabase, ErrorNodeGraphOversizedBucket, ErrorNodeGraphSameNodeId, + ErrorNodeGraphBucketIndex, ErrorNodeConnectionDestroyed, ErrorNodeConnectionTimeout, ErrorNodeConnectionInfoNotExist, diff --git a/src/nodes/types.ts b/src/nodes/types.ts index ffb916851..683143e83 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -1,9 +1,13 @@ import type { Id } from '@matrixai/id'; -import type { Opaque } from '../types'; +import type { Opaque, NonFunctionProperties } from '../types'; import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; +// This should be a string +// actually cause it is a domain +type NodeGraphSpace = '0' | '1'; + type NodeId = Opaque<'NodeId', Id>; type NodeIdString = Opaque<'NodeIdString', string>; type NodeIdEncoded = Opaque<'NodeIdEncoded', string>; @@ -13,9 +17,43 @@ type NodeAddress = { port: Port; }; -type SeedNodes = Record; +type NodeBucketIndex = number; +// Type NodeBucket = Record; + +// TODO: +// No longer need to use NodeIdString +// It's an array, if you want to lookup +// It's ordered by the last updated date +// On the other hand, does this matter +// Not really? +// USE THIS TYPE INSTEAD +type NodeBucket = Array<[NodeId, NodeData]>; + +type NodeBucketMeta = { + count: number; +}; + +type NodeBucketMetaProps = NonFunctionProperties; + +// Just make the bucket entries also +// bucketIndex anot as a key +// but as the domain +// !!NodeGraph!!meta!!ff!!count type NodeData = { + address: NodeAddress; + lastUpdated: number; +}; + +// Type NodeBucketEntry = { +// address: NodeAddress; +// lastUpdated: Date; +// }; + +type SeedNodes = Record; + +// FIXME: should have a proper name +type NodeEntry = { id: NodeId; address: NodeAddress; distance: BigInt; @@ -41,16 +79,6 @@ type NodeInfo = { chain: ChainData; }; -type NodeBucketIndex = number; - -// The data type to be stored in each leveldb entry for the node table -type NodeBucket = { - [key: string]: { - address: NodeAddress; - lastUpdated: Date; - }; -}; - // Only 1 domain, so don't need a 'domain' value (like /gestalts/types.ts) type NodeGraphOp_ = { // Bucket index @@ -72,10 +100,15 @@ export type { NodeIdEncoded, NodeAddress, SeedNodes, - NodeData, NodeClaim, NodeInfo, NodeBucketIndex, + NodeBucketMeta, NodeBucket, + NodeData, + NodeEntry, + // NodeBucketEntry, + NodeGraphOp, + NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 696e31d43..1db803381 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,29 +1,77 @@ -import type { NodeData, NodeId, NodeIdEncoded } from './types'; +import type { + NodeData, + NodeId, + NodeIdEncoded, + NodeBucket, + NodeIdString, + NodeBucketIndex, +} from './types'; +import { utils as dbUtils } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import { bytes2BigInt } from '../utils'; +import lexi from 'lexicographic-integer'; +import { bytes2BigInt, bufferSplit } from '../utils'; + +// FIXME: +const prefixBuffer = Buffer.from([33]); +// Const prefixBuffer = Buffer.from(dbUtils.prefix); /** - * Compute the distance between two nodes. - * distance = nodeId1 ^ nodeId2 - * where ^ = bitwise XOR operator + * Encodes the NodeId as a `base32hex` string */ -function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { - const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); - return bytes2BigInt(distance); +function encodeNodeId(nodeId: NodeId): NodeIdEncoded { + return nodeId.toMultibase('base32hex') as NodeIdEncoded; } /** - * Find the correct index of the k-bucket to add a new node to. + * Decodes an encoded NodeId string into a NodeId + */ +function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { + if (typeof nodeIdEncoded !== 'string') { + return; + } + const nodeId = IdInternal.fromMultibase(nodeIdEncoded); + if (nodeId == null) { + return; + } + // All NodeIds are 32 bytes long + // The NodeGraph requires a fixed size for Node Ids + if (nodeId.length !== 32) { + return; + } + return nodeId; +} + +/** + * Calculate the bucket index that the target node should be located in * A node's k-buckets are organised such that for the ith k-bucket where * 0 <= i < nodeIdBits, the contacts in this ith bucket are known to adhere to * the following inequality: * 2^i <= distance (from current node) < 2^(i+1) + * This means lower buckets will have less nodes then the upper buckets. + * The highest bucket will contain half of all possible nodes. + * The lowest bucket will only contain 1 node. * * NOTE: because XOR is a commutative operation (i.e. a XOR b = b XOR a), the * order of the passed parameters is actually irrelevant. These variables are * purely named for communicating function purpose. + * + * NOTE: Kademlia literature generally talks about buckets with 1-based indexing + * and that the buckets are ordered from largest to smallest. This means the first + * 1th-bucket is far & large bucket, and the last 255th-bucket is the close bucket. + * This is reversed in our `NodeBucketIndex` encoding. This is so that lexicographic + * sort orders our buckets from closest bucket to farthest bucket. + * + * To convert from `NodeBucketIndex` to nth-bucket in Kademlia literature: + * + * | NodeBucketIndex | Nth-Bucket | + * | --------------- | ---------- | + * | 255 | 1 | farthest & largest + * | 254 | 2 | + * | ... | ... | + * | 1 | 254 | + * | 0 | 256 | closest & smallest */ -function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { +function bucketIndex(sourceNode: NodeId, targetNode: NodeId): NodeBucketIndex { const distance = sourceNode.map((byte, i) => byte ^ targetNode[i]); const MSByteIndex = distance.findIndex((byte) => byte !== 0); if (MSByteIndex === -1) { @@ -37,48 +85,221 @@ function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { } /** - * A sorting compareFn to sort an array of NodeData by increasing distance. + * Encodes bucket index to bucket sublevel key */ -function sortByDistance(a: NodeData, b: NodeData) { - if (a.distance > b.distance) { - return 1; - } else if (a.distance < b.distance) { - return -1; - } else { - return 0; +function bucketKey(bucketIndex: NodeBucketIndex): string { + return lexi.pack(bucketIndex, 'hex'); +} + +/** + * Creates key for buckets sublevel + */ +function bucketsDbKey(bucketIndex: NodeBucketIndex, nodeId: NodeId): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + bucketDbKey(nodeId), + ]); +} + +/** + * Creates key for single bucket sublevel + */ +function bucketDbKey(nodeId: NodeId): Buffer { + return nodeId.toBuffer(); +} + +/** + * Creates key for buckets indexed by lastUpdated sublevel + */ +function lastUpdatedBucketsDbKey( + bucketIndex: NodeBucketIndex, + lastUpdated: number, + nodeId: NodeId, +): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + lastUpdatedBucketDbKey(lastUpdated, nodeId), + ]); +} + +/** + * Creates key for single bucket indexed by lastUpdated sublevel + */ +function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { + return Buffer.concat([ + Buffer.from(lexi.pack(lastUpdated, 'hex')), + Buffer.from('-'), + nodeId.toBuffer(), + ]); +} + +/** + * Parse the NodeGraph buckets sublevel key + * The keys look like `!!` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true, + ); + if (bucketKeyBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph buckets key'); } + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + bucketIndex, + bucketKey, + nodeId, + }; } /** - * Encodes the NodeId as a `base32hex` string + * Parse the NodeGraph bucket key + * The keys look like `` */ -function encodeNodeId(nodeId: NodeId): NodeIdEncoded { - return nodeId.toMultibase('base32hex') as NodeIdEncoded; +function parseBucketDbKey(keyBuffer: Buffer): NodeId { + const nodeId = IdInternal.fromBuffer(keyBuffer); + return nodeId; } /** - * Decodes an encoded NodeId string into a NodeId + * Parse the NodeGraph index sublevel key + * The keys look like `!!-` + * It is assumed that the `!` is the sublevel prefix. */ -function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { - if (typeof nodeIdEncoded !== 'string') { - return; +function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, lastUpdatedBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true, + ); + if (bucketKeyBuffer == null || lastUpdatedBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - const nodeId = IdInternal.fromMultibase(nodeIdEncoded); - if (nodeId == null) { - return; + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + if (bucketIndex == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - // All NodeIds are 32 bytes long - // The NodeGraph requires a fixed size for Node Ids - if (nodeId.length !== 32) { - return; + const { lastUpdated, nodeId } = + parseLastUpdatedBucketDbKey(lastUpdatedBuffer); + return { + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + }; +} + +/** + * Parse the NodeGraph index bucket sublevel key + * The keys look like `-` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseLastUpdatedBucketDbKey(keyBuffer: Buffer): { + lastUpdated: number; + nodeId: NodeId; +} { + const [lastUpdatedBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + Buffer.from('-'), + 2, + true, + ); + if (lastUpdatedBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const lastUpdated = lexi.unpack(lastUpdatedBuffer.toString()); + if (lastUpdated == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + lastUpdated, + nodeId, + }; +} + +/** + * Compute the distance between two nodes. + * distance = nodeId1 ^ nodeId2 + * where ^ = bitwise XOR operator + */ +function nodeDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { + const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); + return bytes2BigInt(distance); +} + +function bucketSortByDistance( + bucket: NodeBucket, + nodeId: NodeId, + order: 'asc' | 'desc' = 'asc', +): void { + const distances = {}; + if (order === 'asc') { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 < d2) { + return -1; + } else if (d1 > d2) { + return 1; + } else { + return 0; + } + }); + } else { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 > d2) { + return -1; + } else if (d1 < d2) { + return 1; + } else { + return 0; + } + }); } - return nodeId; } export { - calculateDistance, - calculateBucketIndex, - sortByDistance, + prefixBuffer, encodeNodeId, decodeNodeId, + bucketIndex, + bucketKey, + bucketsDbKey, + bucketDbKey, + lastUpdatedBucketsDbKey, + lastUpdatedBucketDbKey, + parseBucketsDbKey, + parseBucketDbKey, + parseLastUpdatedBucketsDbKey, + parseLastUpdatedBucketDbKey, + nodeDistance, + bucketSortByDistance, }; diff --git a/src/types.ts b/src/types.ts index 161181b8d..fae58ae01 100644 --- a/src/types.ts +++ b/src/types.ts @@ -86,6 +86,24 @@ interface FileSystem { type FileHandle = fs.promises.FileHandle; +type FunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? K : never; +}[keyof T]; + +/** + * Functional properties of an object + */ +type FunctionProperties = Pick>; + +type NonFunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? never : K; +}[keyof T]; + +/** + * Non-functional properties of an object + */ +type NonFunctionProperties = Pick>; + export type { POJO, Opaque, @@ -99,4 +117,6 @@ export type { Timer, FileSystem, FileHandle, + FunctionProperties, + NonFunctionProperties, }; diff --git a/src/utils/index.ts b/src/utils/index.ts index f50908aca..2ee8414ff 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -2,4 +2,5 @@ export { default as sysexits } from './sysexits'; export * from './utils'; export * from './matchers'; export * from './binary'; +export * from './random'; export * as errors from './errors'; diff --git a/src/utils/random.ts b/src/utils/random.ts new file mode 100644 index 000000000..fa0c3ecda --- /dev/null +++ b/src/utils/random.ts @@ -0,0 +1,11 @@ +/** + * Gets a random number between min (inc) and max (exc) + * This is not cryptographically-secure + */ +function getRandomInt(min: number, max: number) { + min = Math.ceil(min); + max = Math.floor(max); + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +export { getRandomInt }; diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 4d837ecbf..7c623e8dd 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -236,6 +236,67 @@ function arrayZipWithPadding( ]); } +async function asyncIterableArray( + iterable: AsyncIterable, +): Promise> { + const arr: Array = []; + for await (const item of iterable) { + arr.push(item); + } + return arr; +} + +function bufferSplit( + input: Buffer, + delimiter?: Buffer, + limit?: number, + remaining: boolean = false, +): Array { + const output: Array = []; + let delimiterOffset = 0; + let delimiterIndex = 0; + let i = 0; + if (delimiter != null) { + while (true) { + if (i === limit) break; + delimiterIndex = input.indexOf(delimiter, delimiterOffset); + if (delimiterIndex > -1) { + output.push(input.subarray(delimiterOffset, delimiterIndex)); + delimiterOffset = delimiterIndex + delimiter.byteLength; + } else { + const chunk = input.subarray(delimiterOffset); + output.push(chunk); + delimiterOffset += chunk.byteLength; + break; + } + i++; + } + } else { + for (; delimiterIndex < input.byteLength; ) { + if (i === limit) break; + delimiterIndex++; + const chunk = input.subarray(delimiterOffset, delimiterIndex); + output.push(chunk); + delimiterOffset += chunk.byteLength; + i++; + } + } + // If remaining, then the rest of the input including delimiters is extracted + if ( + remaining && + limit != null && + output.length > 0 && + delimiterIndex > -1 && + delimiterIndex <= input.byteLength + ) { + const inputRemaining = input.subarray( + delimiterIndex - output[output.length - 1].byteLength, + ); + output[output.length - 1] = inputRemaining; + } + return output; +} + function debounce

( f: (...params: P) => any, timeout: number = 0, @@ -266,5 +327,7 @@ export { arrayUnset, arrayZip, arrayZipWithPadding, + asyncIterableArray, + bufferSplit, debounce, }; diff --git a/src/validation/utils.ts b/src/validation/utils.ts index 3ce13f258..020c1f51a 100644 --- a/src/validation/utils.ts +++ b/src/validation/utils.ts @@ -165,7 +165,7 @@ function parseHostOrHostname(data: any): Host | Hostname { * Parses number into a Port * Data can be a string-number */ -function parsePort(data: any): Port { +function parsePort(data: any, connect: boolean = false): Port { if (typeof data === 'string') { try { data = parseInteger(data); @@ -176,10 +176,16 @@ function parsePort(data: any): Port { throw e; } } - if (!networkUtils.isPort(data)) { - throw new validationErrors.ErrorParse( - 'Port must be a number between 0 and 65535 inclusive', - ); + if (!networkUtils.isPort(data, connect)) { + if (!connect) { + throw new validationErrors.ErrorParse( + 'Port must be a number between 0 and 65535 inclusive', + ); + } else { + throw new validationErrors.ErrorParse( + 'Port must be a number between 1 and 65535 inclusive', + ); + } } return data; } diff --git a/test-iterator.ts b/test-iterator.ts new file mode 100644 index 000000000..82a21762c --- /dev/null +++ b/test-iterator.ts @@ -0,0 +1,31 @@ + + +function getYouG () { + console.log('ALREADY EXECUTED'); + return abc(); +} + +async function *abc() { + console.log('START'); + yield 1; + yield 2; + yield 3; +} + +async function main () { + + // we would want that you don't iterate it + + const g = getYouG(); + + await g.next(); + + // console.log('SUP'); + + // for await (const r of abc()) { + // console.log(r); + // } + +} + +main(); diff --git a/test-lexi.ts b/test-lexi.ts new file mode 100644 index 000000000..b48f9cea1 --- /dev/null +++ b/test-lexi.ts @@ -0,0 +1,4 @@ +import lexi from 'lexicographic-integer'; + + +console.log(lexi.pack(1646203779)); diff --git a/test-nodegraph.ts b/test-nodegraph.ts new file mode 100644 index 000000000..33bd58bb7 --- /dev/null +++ b/test-nodegraph.ts @@ -0,0 +1,107 @@ +import type { NodeId, NodeAddress } from './src/nodes/types'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; +import NodeGraph from './src/nodes/NodeGraph'; +import KeyManager from './src/keys/KeyManager'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/db' + }); + + const keyManager = await KeyManager.createKeyManager({ + keysPath: './tmp/keys', + password: 'abc123', + // fresh: true + }); + + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + fresh: true + }); + + for (let i = 0; i < 10; i++) { + await nodeGraph.setNode( + generateRandomNodeId(), + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress + ); + } + + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + + // the bucket lengths are wrong + console.log( + 'BucketIndex', + bucketIndex, + 'Bucket Count', + bucket.length, + ); + + // console.log(bucket); + for (const [nodeId, nodeData] of bucket) { + // console.log('NODEID', nodeId); + // console.log('NODEDATA', nodeData); + // console.log(nodeData.address); + } + } + + for await (const [nodeId, nodeData] of nodeGraph.getNodes()) { + // console.log(nodeId, nodeData); + } + + const bucket = await nodeGraph.getBucket(255, 'lastUpdated'); + console.log(bucket.length); + + // console.log('OLD NODE ID', keyManager.getNodeId()); + // const newNodeId = generateRandomNodeId(); + // console.log('NEW NODE ID', newNodeId); + + // console.log('---------FIRST RESET--------'); + + // await nodeGraph.resetBuckets(newNodeId); + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + + // console.log('---------SECOND RESET--------'); + // const newNodeId2 = generateRandomNodeId(); + // await nodeGraph.resetBuckets(newNodeId2); + + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + await nodeGraph.stop(); + await keyManager.stop(); + await db.stop(); +} + +main(); diff --git a/test-nodeidgen.ts b/test-nodeidgen.ts new file mode 100644 index 000000000..2f79bddda --- /dev/null +++ b/test-nodeidgen.ts @@ -0,0 +1,44 @@ +import type { NodeId } from './src/nodes/types'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const firstNodeId = generateRandomNodeId(); + + + let lastBucket = 0; + let penultimateBucket = 0; + let lowerBuckets = 0; + + for (let i = 0; i < 1000; i++) { + const nodeId = generateRandomNodeId(); + const bucketIndex = nodesUtils.bucketIndex(firstNodeId, nodeId); + if (bucketIndex === 255) { + lastBucket++; + } else if (bucketIndex === 254) { + penultimateBucket++; + } else { + lowerBuckets++; + } + } + + console.log(lastBucket); + console.log(penultimateBucket); + console.log(lowerBuckets); + + +} + +main(); diff --git a/test-order.ts b/test-order.ts new file mode 100644 index 000000000..f6046d6da --- /dev/null +++ b/test-order.ts @@ -0,0 +1,98 @@ +import { DB } from '@matrixai/db'; +import lexi from 'lexicographic-integer'; +import { getUnixtime, hex2Bytes } from './src/utils'; + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/orderdb', + fresh: true + }); + + await db.put([], 'node1', 'value'); + await db.put([], 'node2', 'value'); + await db.put([], 'node3', 'value'); + await db.put([], 'node4', 'value'); + await db.put([], 'node5', 'value'); + await db.put([], 'node6', 'value'); + await db.put([], 'node7', 'value'); + + const now = new Date; + const t1 = new Date(now.getTime() + 1000 * 1); + const t2 = new Date(now.getTime() + 1000 * 2); + const t3 = new Date(now.getTime() + 1000 * 3); + const t4 = new Date(now.getTime() + 1000 * 4); + const t5 = new Date(now.getTime() + 1000 * 5); + const t6 = new Date(now.getTime() + 1000 * 6); + const t7 = new Date(now.getTime() + 1000 * 7); + + // so unix time is only what we really need to know + // further precision is unlikely + // and hex-packed time is shorter keys + // so it is likely faster + // the only issue is that unpacking requires + // converting hex into bytes, then into strings + + // console.log(t1.getTime()); + // console.log(getUnixtime(t1)); + // console.log(lexi.pack(getUnixtime(t1), 'hex')); + // console.log(lexi.pack(t1.getTime(), 'hex')); + // console.log(t1.toISOString()); + + + // buckets0!BUCKETINDEX!NODEID + // buckets0!BUCKETINDEX!date + + // Duplicate times that are put here + // But differentiate by the node1, node2 + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node1', 'value'); + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node2', 'value'); + + await db.put([], lexi.pack(getUnixtime(t1), 'hex') + '-node3', 'value'); + await db.put([], lexi.pack(getUnixtime(t4), 'hex') + '-node4', 'value'); + await db.put([], lexi.pack(getUnixtime(t3), 'hex') + '-node5', 'value'); + await db.put([], lexi.pack(getUnixtime(t2), 'hex') + '-node6', 'value'); + await db.put([], lexi.pack(getUnixtime(t5), 'hex') + '-node7', 'value'); + + // await db.put([], t6.toISOString() + '-node1', 'value'); + // await db.put([], t6.toISOString() + '-node2', 'value'); + + // await db.put([], t1.toISOString() + '-node3', 'value'); + // await db.put([], t4.toISOString() + '-node4', 'value'); + // await db.put([], t3.toISOString() + '-node5', 'value'); + // await db.put([], t2.toISOString() + '-node6', 'value'); + // await db.put([], t5.toISOString() + '-node7', 'value'); + + // Why did this require `-node3` + + // this will awlays get one or the other + + // ok so we if we want to say get a time + // or order it by time + // we are goingto have to create read stream over the bucket right? + // yea so we would have another sublevel, or at least a sublevel formed by the bucket + // one that is the bucket index + // so that would be the correct way to do it + + for await (const o of db.db.createReadStream({ + gte: lexi.pack(getUnixtime(t1), 'hex'), + limit: 1, + // keys: true, + // values: true, + // lte: lexi.pack(getUnixtime(t6)) + })) { + + console.log(o.key.toString()); + + } + + await db.stop(); + + + // so it works + // now if you give it something liek + + +} + +main(); diff --git a/test-sorting.ts b/test-sorting.ts new file mode 100644 index 000000000..1692fa83f --- /dev/null +++ b/test-sorting.ts @@ -0,0 +1,28 @@ +import * as testNodesUtils from './tests/nodes/utils'; + +const arr = [ + { a: 'abc', b: 3}, + { a: 'abc', b: 1}, + { a: 'abc', b: 0}, +]; + +arr.sort((a, b): number => { + if (a.b > b.b) { + return 1; + } else if (a.b < b.b) { + return -1; + } else { + return 0; + } +}); + +console.log(arr); + +const arr2 = [3, 1, 0]; + +arr2.sort(); + +console.log(arr2); + + +console.log(testNodesUtils.generateRandomNodeId()); diff --git a/test-split.ts b/test-split.ts new file mode 100644 index 000000000..ee06d75d6 --- /dev/null +++ b/test-split.ts @@ -0,0 +1,37 @@ + +function bufferSplit(input: Buffer, delimiter?: Buffer): Array { + const output: Array = []; + let delimiterIndex = 0; + let chunkIndex = 0; + if (delimiter != null) { + while (true) { + const i = input.indexOf( + delimiter, + delimiterIndex + ); + if (i > -1) { + output.push(input.subarray(chunkIndex, i)); + delimiterIndex = i + delimiter.byteLength; + chunkIndex = i + delimiter.byteLength; + } else { + output.push(input.subarray(chunkIndex)); + break; + } + } + } else { + for (let i = 0; i < input.byteLength; i++) { + output.push(input.subarray(i, i + 1)); + } + } + return output; +} + + +const b = Buffer.from('!a!!b!'); + +console.log(bufferSplit(b, Buffer.from('!!'))); +console.log(bufferSplit(b)); + +const s = '!a!!b!'; + +console.log(s.split('!!')); diff --git a/test-trie.ts b/test-trie.ts new file mode 100644 index 000000000..a17c4165d --- /dev/null +++ b/test-trie.ts @@ -0,0 +1,29 @@ +import * as utils from './src/utils'; +import * as nodesUtils from './src/nodes/utils'; + +// 110 +const ownNodeId = Buffer.from([6]); + +const i = 2; + +const maxDistance = utils.bigInt2Bytes(BigInt(2 ** i)); +const minDistance = utils.bigInt2Bytes(BigInt(2 ** (i - 1))); + +console.log('max distance', maxDistance, utils.bytes2Bits(maxDistance)); +console.log('min distance', minDistance, utils.bytes2Bits(minDistance)); + +// ownNodeId XOR maxdistance = GTE node id +const gte = ownNodeId.map((byte, i) => byte ^ maxDistance[i]); + +// ownNodeId XOR mindistance = LT node id +const lt = ownNodeId.map((byte, i) => byte ^ minDistance[i]); + +console.log('Lowest Distance Node (inc)', gte, utils.bytes2Bits(gte)); +console.log('Greatest Distance Node (exc)', lt, utils.bytes2Bits(lt)); + +// function nodeDistance(nodeId1: Buffer, nodeId2: Buffer): bigint { +// const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); +// return utils.bytes2BigInt(distance); +// } + +// console.log(nodeDistance(ownNodeId, Buffer.from([0]))); diff --git a/tests/acl/ACL.test.ts b/tests/acl/ACL.test.ts index a671caf10..cd0658560 100644 --- a/tests/acl/ACL.test.ts +++ b/tests/acl/ACL.test.ts @@ -12,6 +12,7 @@ import * as aclErrors from '@/acl/errors'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe(ACL.name, () => { const logger = new Logger(`${ACL.name} test`, LogLevel.WARN, [ @@ -19,14 +20,14 @@ describe(ACL.name, () => { ]); // Node Ids - const nodeIdX = testUtils.generateRandomNodeId(); - const nodeIdY = testUtils.generateRandomNodeId(); - const nodeIdG1First = testUtils.generateRandomNodeId(); - const nodeIdG1Second = testUtils.generateRandomNodeId(); - const nodeIdG1Third = testUtils.generateRandomNodeId(); - const nodeIdG1Fourth = testUtils.generateRandomNodeId(); - const nodeIdG2First = testUtils.generateRandomNodeId(); - const nodeIdG2Second = testUtils.generateRandomNodeId(); + const nodeIdX = testNodesUtils.generateRandomNodeId(); + const nodeIdY = testNodesUtils.generateRandomNodeId(); + const nodeIdG1First = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Second = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Third = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Fourth = testNodesUtils.generateRandomNodeId(); + const nodeIdG2First = testNodesUtils.generateRandomNodeId(); + const nodeIdG2Second = testNodesUtils.generateRandomNodeId(); let dataDir: string; let db: DB; @@ -108,18 +109,30 @@ describe(ACL.name, () => { await expect(acl.setNodesPerm([], {} as Permission)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); + await expect(acl.setNodesPermOps([], {} as Permission)).rejects.toThrow( + aclErrors.ErrorACLNotRunning, + ); await expect(acl.setNodePerm(nodeIdX, {} as Permission)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); + await expect(acl.setNodePermOps(nodeIdX, {} as Permission)).rejects.toThrow( + aclErrors.ErrorACLNotRunning, + ); await expect(acl.unsetNodePerm(nodeIdX)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); + await expect(acl.unsetNodePermOps(nodeIdX)).rejects.toThrow( + aclErrors.ErrorACLNotRunning, + ); await expect(acl.unsetVaultPerms(1 as VaultId)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); await expect(acl.joinNodePerm(nodeIdX, [])).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); + await expect(acl.joinNodePermOps(nodeIdX, [])).rejects.toThrow( + aclErrors.ErrorACLNotRunning, + ); await expect(acl.joinVaultPerms(1 as VaultId, [])).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index f2b896024..7712d0fa8 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -1,5 +1,4 @@ import type { Host, Port, ProxyConfig } from '@/network/types'; - import type { IAgentServiceServer } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; import type { KeyManager } from '@/keys'; import type { VaultManager } from '@/vaults'; @@ -20,7 +19,7 @@ import { createAgentService, GRPCClientAgent, } from '@/agent'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; async function openTestAgentServer({ keyManager, @@ -89,7 +88,7 @@ async function openTestAgentClient( new StreamHandler(), ]); return await GRPCClientAgent.createGRPCClientAgent({ - nodeId: nodeId ?? testUtils.generateRandomNodeId(), + nodeId: nodeId ?? testNodesUtils.generateRandomNodeId(), host: '127.0.0.1' as Host, port: port as Port, logger: logger, diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index 062cf6cdf..85b598786 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -11,11 +11,12 @@ import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('add', () => { const logger = new Logger('add test', LogLevel.WARN, [new StreamHandler()]); const password = 'helloworld'; - const validNodeId = testUtils.generateRandomNodeId(); + const validNodeId = testNodesUtils.generateRandomNodeId(); const invalidNodeId = IdInternal.fromString('INVALIDID'); const validHost = '0.0.0.0'; const invalidHost = 'INVALIDHOST'; diff --git a/tests/bin/vaults/vaults.test.ts b/tests/bin/vaults/vaults.test.ts index 52b5f4e4c..949f208ee 100644 --- a/tests/bin/vaults/vaults.test.ts +++ b/tests/bin/vaults/vaults.test.ts @@ -11,7 +11,7 @@ import * as vaultsUtils from '@/vaults/utils'; import sysexits from '@/utils/sysexits'; import NotificationsManager from '@/notifications/NotificationsManager'; import * as testBinUtils from '../utils'; -import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -378,7 +378,7 @@ describe('CLI vaults', () => { mockedSendNotification.mockImplementation(async (_) => {}); const vaultId = await polykeyAgent.vaultManager.createVault(vaultName); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -418,7 +418,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -489,7 +489,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), diff --git a/tests/claims/utils.test.ts b/tests/claims/utils.test.ts index f7c6e6410..069a6dcef 100644 --- a/tests/claims/utils.test.ts +++ b/tests/claims/utils.test.ts @@ -11,12 +11,13 @@ import * as claimsErrors from '@/claims/errors'; import { utils as keysUtils } from '@/keys'; import { utils as nodesUtils } from '@/nodes'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('claims/utils', () => { // Node Ids - const nodeId1 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); const nodeId1Encoded = nodesUtils.encodeNodeId(nodeId1); - const nodeId2 = testUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); const nodeId2Encoded = nodesUtils.encodeNodeId(nodeId2); let publicKey: PublicKeyPem; diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index 7071428e6..e553a0693 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -26,6 +26,7 @@ import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('gestaltsDiscoveryByNode', () => { const logger = new Logger('gestaltsDiscoveryByNode test', LogLevel.WARN, [ @@ -35,7 +36,7 @@ describe('gestaltsDiscoveryByNode', () => { const authenticate = async (metaClient, metaServer = new Metadata()) => metaServer; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; let mockedGenerateKeyPair: jest.SpyInstance; diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 73690a54d..d78bb5eaa 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -24,12 +24,13 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('notificationsRead', () => { const logger = new Logger('notificationsRead test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdSender = testUtils.generateRandomNodeId(); + const nodeIdSender = testNodesUtils.generateRandomNodeId(); const nodeIdSenderEncoded = nodesUtils.encodeNodeId(nodeIdSender); const password = 'helloworld'; const authenticate = async (metaClient, metaServer = new Metadata()) => diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index c11c8d000..1b6e0e120 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -237,7 +237,7 @@ describe('Discovery', () => { discovery.queueDiscoveryByIdentity('' as ProviderId, '' as IdentityId), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); await expect( - discovery.queueDiscoveryByNode(testUtils.generateRandomNodeId()), + discovery.queueDiscoveryByNode(testNodesUtils.generateRandomNodeId()), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); }); test('discovery by node', async () => { diff --git a/tests/gestalts/GestaltGraph.test.ts b/tests/gestalts/GestaltGraph.test.ts index 4b69761ce..0953a2b4a 100644 --- a/tests/gestalts/GestaltGraph.test.ts +++ b/tests/gestalts/GestaltGraph.test.ts @@ -20,19 +20,19 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as gestaltsUtils from '@/gestalts/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GestaltGraph', () => { const logger = new Logger('GestaltGraph Test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdDEE = testUtils.generateRandomNodeId(); + const nodeIdDEE = testNodesUtils.generateRandomNodeId(); const nodeIdDEEEncoded = nodesUtils.encodeNodeId(nodeIdDEE); - const nodeIdDEF = testUtils.generateRandomNodeId(); + const nodeIdDEF = testNodesUtils.generateRandomNodeId(); const nodeIdDEFEncoded = nodesUtils.encodeNodeId(nodeIdDEF); - const nodeIdZZZ = testUtils.generateRandomNodeId(); + const nodeIdZZZ = testNodesUtils.generateRandomNodeId(); const nodeIdZZZEncoded = nodesUtils.encodeNodeId(nodeIdZZZ); let dataDir: string; diff --git a/tests/grpc/GRPCClient.test.ts b/tests/grpc/GRPCClient.test.ts index 31028187e..e18f301e6 100644 --- a/tests/grpc/GRPCClient.test.ts +++ b/tests/grpc/GRPCClient.test.ts @@ -17,7 +17,7 @@ import * as grpcErrors from '@/grpc/errors'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as utils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; import { expectRemoteError } from '../utils'; describe('GRPCClient', () => { @@ -62,7 +62,7 @@ describe('GRPCClient', () => { }, }); const keyManager = { - getNodeId: () => testUtils.generateRandomNodeId(), + getNodeId: () => testNodesUtils.generateRandomNodeId(), } as KeyManager; // Cheeky mocking. sessionManager = await SessionManager.createSessionManager({ db, diff --git a/tests/identities/IdentitiesManager.test.ts b/tests/identities/IdentitiesManager.test.ts index b7ca969b0..23000440b 100644 --- a/tests/identities/IdentitiesManager.test.ts +++ b/tests/identities/IdentitiesManager.test.ts @@ -17,7 +17,7 @@ import * as identitiesErrors from '@/identities/errors'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import TestProvider from './TestProvider'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('IdentitiesManager', () => { const logger = new Logger('IdentitiesManager Test', LogLevel.WARN, [ @@ -219,7 +219,7 @@ describe('IdentitiesManager', () => { expect(identityDatas).toHaveLength(1); expect(identityDatas).not.toContainEqual(identityData); // Now publish a claim - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); const signatures: Record = {}; signatures[nodeIdSome] = { diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index f199f7a0b..fc8055ea7 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -13,6 +13,7 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import { poll, promise, promisify, timerStart, timerStop } from '@/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; /** * Mock HTTP Connect Request @@ -110,11 +111,11 @@ describe(Proxy.name, () => { const logger = new Logger(`${Proxy.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); - const nodeIdRandom = testUtils.generateRandomNodeId(); + const nodeIdRandom = testNodesUtils.generateRandomNodeId(); const authToken = 'abc123'; let keyPairPem: KeyPairPem; let certPem: string; diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index ed80ab06a..c22475912 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -33,6 +33,7 @@ import * as GRPCErrors from '@/grpc/errors'; import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; +import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; import * as grpcTestUtils from '../grpc/utils'; import * as agentTestUtils from '../agent/utils'; @@ -72,7 +73,7 @@ describe('${NodeConnection.name} test', () => { const password = 'password'; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; @@ -703,7 +704,7 @@ describe('${NodeConnection.name} test', () => { "should call `killSelf and throw if the server %s's during testUnaryFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -748,7 +749,7 @@ describe('${NodeConnection.name} test', () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); @@ -773,7 +774,7 @@ describe('${NodeConnection.name} test', () => { "should call `killSelf and throw if the server %s's during testStreamFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -818,7 +819,7 @@ describe('${NodeConnection.name} test', () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index a6c3638cb..d21be106b 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -19,8 +19,7 @@ import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import * as nodesTestUtils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { const logger = new Logger( @@ -419,11 +418,11 @@ describe(`${NodeConnectionManager.name} general test`, () => { try { // Generate the node ID to find the closest nodes to (in bucket 100) const nodeId = keyManager.getNodeId(); - const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); + const nodeIdToFind = testNodesUtils.generateNodeIdForBucket(nodeId, 100); // Now generate and add 20 nodes that will be close to this node ID const addedClosestNodes: NodeData[] = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( nodeIdToFind, i, ); @@ -489,7 +488,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { // Now generate and add 20 nodes that will be close to this node ID const addedClosestNodes: NodeData[] = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( targetNodeId, i, ); @@ -551,8 +550,8 @@ describe(`${NodeConnectionManager.name} general test`, () => { // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); - const targetNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); await nodeConnectionManager.sendHolePunchMessage( remoteNodeId1, sourceNodeId, @@ -588,7 +587,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); const relayMessage = new nodesPB.Relay(); relayMessage.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMessage.setTargetId(nodesUtils.encodeNodeId(remoteNodeId1)); diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 6b9eec700..6ea350cad 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,59 +1,46 @@ -import type { Host, Port } from '@/network/types'; -import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import type { + NodeId, + NodeData, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '@/nodes/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; -import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; -import Sigchain from '@/sigchain/Sigchain'; -import * as nodesTestUtils from './utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; +import * as testUtils from '../utils'; describe(`${NodeGraph.name} test`, () => { - const localHost = '127.0.0.1' as Host; - const port = 0 as Port; const password = 'password'; - let nodeGraph: NodeGraph; - let nodeId: NodeId; - - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; - - const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - let proxy: Proxy; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let dataDir: string; let keyManager: KeyManager; + let dbKey: Buffer; + let dbPath: string; let db: DB; - let nodeConnectionManager: NodeConnectionManager; - let sigchain: Sigchain; - - const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; - - const mockedGenerateDeterministicKeyPair = jest.spyOn( - keysUtils, - 'generateDeterministicKeyPair', - ); - - beforeEach(async () => { - mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { - return keysUtils.generateKeyPair(bits); - }); - + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValue(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValue(globalKeyPair); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -63,559 +50,669 @@ describe(`${NodeGraph.name} test`, () => { keysPath, logger, }); - proxy = new Proxy({ - authToken: 'auth', - logger: logger, - }); - await proxy.start({ - serverHost: localHost, - serverPort: port, - tlsConfig: { - keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, - certChainPem: await keyManager.getRootCertChainPem(), - }, + dbKey = await keysUtils.generateKey(); + dbPath = `${dataDir}/db`; + }); + afterAll(async () => { + await keyManager.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, }); - const dbPath = `${dataDir}/db`; + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + beforeEach(async () => { db = await DB.createDB({ dbPath, logger, crypto: { - key: keyManager.dbKey, + key: dbKey, ops: { encrypt: keysUtils.encryptWithKey, decrypt: keysUtils.decryptWithKey, }, }, }); - sigchain = await Sigchain.createSigchain({ - keyManager: keyManager, - db: db, - logger: logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ + }); + afterEach(async () => { + await db.stop(); + await db.destroy(); + }); + test('get, set and unset node IDs', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - nodeConnectionManager = new NodeConnectionManager({ - keyManager: keyManager, - nodeGraph: nodeGraph, - proxy: proxy, - logger: logger, + let nodeId1: NodeId; + do { + nodeId1 = testNodesUtils.generateRandomNodeId(); + } while (nodeId1.equals(keyManager.getNodeId())); + let nodeId2: NodeId; + do { + nodeId2 = testNodesUtils.generateRandomNodeId(); + } while (nodeId2.equals(keyManager.getNodeId())); + + await nodeGraph.setNode(nodeId1, { + host: '10.0.0.1', + port: 1234, + } as NodeAddress); + const nodeData1 = await nodeGraph.getNode(nodeId1); + expect(nodeData1).toStrictEqual({ + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), }); - await nodeConnectionManager.start(); - // Retrieve the NodeGraph reference from NodeManager - nodeId = keyManager.getNodeId(); - }); - - afterEach(async () => { - await db.stop(); - await sigchain.stop(); - await nodeConnectionManager.stop(); - await nodeGraph.stop(); - await keyManager.stop(); - await proxy.stop(); - await fs.promises.rm(dataDir, { - force: true, - recursive: true, + await utils.sleep(1000); + await nodeGraph.setNode(nodeId2, { + host: 'abc.com', + port: 8978, + } as NodeAddress); + const nodeData2 = await nodeGraph.getNode(nodeId2); + expect(nodeData2).toStrictEqual({ + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), }); + expect(nodeData2!.lastUpdated > nodeData1!.lastUpdated).toBe(true); + const nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(2); + expect(nodes).toContainEqual([ + nodeId1, + { + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(nodes).toContainEqual([ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ]); + await nodeGraph.unsetNode(nodeId1); + expect(await nodeGraph.getNode(nodeId1)).toBeUndefined(); + expect(await utils.asyncIterableArray(nodeGraph.getNodes())).toStrictEqual([ + [ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ], + ]); + await nodeGraph.unsetNode(nodeId2); + await nodeGraph.stop(); }); - - test('NodeGraph readiness', async () => { - const nodeGraph2 = await NodeGraph.createNodeGraph({ + test('get all nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - // @ts-ignore - await expect(nodeGraph2.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeGraphRunning, - ); - // Should be a noop - await nodeGraph2.start(); - await nodeGraph2.stop(); - await nodeGraph2.destroy(); - await expect(async () => { - await nodeGraph2.start(); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - }); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeGraph.setNode(nodeId1, nodeAddress1); - expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); - - // Unknown node - expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); - }); - test('finds correct node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address - const foundAddress = await nodeGraph.getNode(newNode2Id); - expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); - }); - test('unable to find node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address (of non-existent node) - const foundAddress = await nodeGraph.getNode(dummyNode); - expect(foundAddress).toBeUndefined(); - }); - test('adds a single node into a bucket', async () => { - // New node added - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Check new node is in retrieved bucket from database - // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 - const bucket = await nodeGraph.getBucket(1); - expect(bucket).toBeDefined(); - expect(bucket![newNode2Id]).toEqual({ - address: { host: '227.1.1.1', port: 4567 }, - lastUpdated: expect.any(Date), + let nodeIds = Array.from({ length: 25 }, () => { + return testNodesUtils.generateRandomNodeId(); }); - }); - test('adds multiple nodes into the same bucket', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + let bucketIndexes: Array; + let nodes: Array<[NodeId, NodeData]>; + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(0); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + } + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes ascending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + // Sorted by bucket indexes ascending explicitly + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('asc')); + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(4); - expect(bucket).toBeDefined(); - if (!bucket) fail('bucket should be defined, letting TS know'); - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('desc')); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes descending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), + ); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] >= bucketIndex; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('setting same node ID throws error', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), + await expect( + nodeGraph.setNode(keyManager.getNodeId(), { + host: '127.0.0.1', + port: 55555, + } as NodeAddress), + ).rejects.toThrow(nodesErrors.ErrorNodeGraphSameNodeId); + await nodeGraph.stop(); + }); + test('get bucket with 1 node', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), + let nodeId: NodeId; + do { + nodeId = testNodesUtils.generateRandomNodeId(); + } while (nodeId.equals(keyManager.getNodeId())); + // Set one node + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + const bucketIndex = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId); + const bucket = await nodeGraph.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + expect(bucket[0]).toStrictEqual([ + nodeId, + { + address: { + host: '127.0.0.1', + port: 55555, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(await nodeGraph.getBucketMeta(bucketIndex)).toStrictEqual({ + count: 1, }); - }); - test('adds a single node into different buckets', async () => { - // New node for bucket 3 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); - const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - // New node for bucket 255 (the highest possible bucket) - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const bucket3 = await nodeGraph.getBucket(3); - const bucket351 = await nodeGraph.getBucket(255); - if (bucket3 && bucket351) { - expect(bucket3[newNode1Id]).toEqual({ - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(Date), - }); - expect(bucket351[newNode2Id]).toEqual({ - address: { host: '2.2.2.2', port: 2222 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('deletes a single node (and removes bucket)', async () => { - // New node for bucket 2 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - // Check the bucket is there first - const bucket = await nodeGraph.getBucket(2); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); + // Adjacent bucket should be empty + let bucketIndex_: number; + if (bucketIndex >= nodeId.length * 8 - 1) { + bucketIndex_ = bucketIndex - 1; + } else if (bucketIndex === 0) { + bucketIndex_ = bucketIndex + 1; } else { - // Should be unreachable - fail('Bucket undefined'); + bucketIndex_ = bucketIndex + 1; } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check bucket no longer exists - const newBucket = await nodeGraph.getBucket(2); - expect(newBucket).toBeUndefined(); + expect(await nodeGraph.getBucket(bucketIndex_)).toHaveLength(0); + expect(await nodeGraph.getBucketMeta(bucketIndex_)).toStrictEqual({ + count: 0, + }); + await nodeGraph.stop(); }); - test('deletes a single node (and retains remainder of bucket)', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, - ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + test('get bucket with multiple nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // Contiguous node IDs starting from 0 + let nodeIds = Array.from({ length: 25 }, (_, i) => + IdInternal.create( + utils.bigInt2Bytes(BigInt(i), keyManager.getNodeId().byteLength), + ), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(bucketIndex); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(bucketIndex); - if (newBucket) { - expect(newBucket[newNode1Id]).toBeUndefined(); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); + // Use first and last buckets because node IDs may be split between buckets + const bucketIndexFirst = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[0], + ); + const bucketIndexLast = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[nodeIds.length - 1], + ); + const bucketFirst = await nodeGraph.getBucket(bucketIndexFirst); + const bucketLast = await nodeGraph.getBucket(bucketIndexLast); + let bucket: NodeBucket; + let bucketIndex: NodeBucketIndex; + if (bucketFirst.length >= bucketLast.length) { + bucket = bucketFirst; + bucketIndex = bucketIndexFirst; } else { - // Should be unreachable - fail('New bucket undefined'); + bucket = bucketLast; + bucketIndex = bucketIndexLast; } - }); - test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - // Keep a record of the first node ID that we added - const firstNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, + expect(bucket.length > 1).toBe(true); + let bucketNodeIds = bucket.map(([nodeId]) => nodeId); + // The node IDs must be sorted lexicographically + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 1; + }), + ).toBe(true); + // Sort by node ID asc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'asc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); + // Sort by node ID desc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'desc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); + // Sort by distance asc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'asc'); + let bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), - nodeAddress, - ); - // Increment the current node ID + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); + // Sort by distance desc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'desc'); + bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), + ); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); + // Sort by lastUpdated asc + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'asc'); + let bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'desc'); + bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get all buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 50; i++) { + await utils.sleep(50); + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, - ); - } else { - // Should be unreachable - fail('Bucket undefined'); + let bucketIndex_ = -1; + // Ascending order + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); } - - // Attempt to add a new node into this full bucket (increment the last node - // ID that was added) - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - nodeGraph.maxNodesPerBucket + 1, - ); - const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; - await nodeGraph.setNode(newNodeId, newNodeAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket (but no more) - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, - ); - // Ensure that this new node is in the bucket - expect(finalBucket[newNodeId]).toEqual({ - address: newNodeAddress, - lastUpdated: expect.any(Date), - }); - // NODEID1 should have been removed from this bucket (as this was the least active) - // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first) - expect(finalBucket[firstNodeId]).toBeUndefined(); - } else { - // Should be unreachable - fail('Bucket undefined'); + // There must have been at least 1 bucket + expect(bucketIndex_).not.toBe(-1); + // Descending order + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); } - }); - test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - const currNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - ); - // Keep a record of the first node ID that we added - // const firstNodeId = currNodeId; - let increment = 1; - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), - nodeAddress, + expect(bucketIndex_).not.toBe(keyManager.getNodeId().length * 8); + // Distance ascending order + // Lower distance buckets first + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - // Increment the current node ID - skip for the last one to keep currNodeId - // as the last added node ID - if (i !== nodeGraph.maxNodesPerBucket) { - increment++; + // It's the LAST bucket that fails this + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); + } + // Distance descending order + // Higher distance buckets first + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), + ); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + // Last updated ascending order + // Bucket index is ascending + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); } - - // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active" - // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one - // So, re-add the last node - const newLastAddress: NodeAddress = { - host: '30.30.30.30' as Host, - port: 30 as Port, - }; - await nodeGraph.setNode(currNodeId, newLastAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Last updated descending order + // Bucket index is descending + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - // Ensure that this new node is in the bucket - expect(finalBucket[currNodeId]).toEqual({ - address: newLastAddress, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); } + await nodeGraph.stop(); }); - test('retrieves all buckets (in expected lexicographic order)', async () => { - // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) - // Bucket 1 (minimum): - - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Bucket 4 (multiple nodes in 1 bucket): - const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); - const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; - await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); - const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; - await nodeGraph.setNode(node42Id, node42Address); - - // Bucket 10 (lexicographic ordering - should appear after 2): - const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); - const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; - await nodeGraph.setNode(node10Id, node10Address); - - // Bucket 255 (maximum): - const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const node255Address = { - host: '255.255.255.255', - port: 255, - } as NodeAddress; - await nodeGraph.setNode(node255Id, node255Address); - - const buckets = await nodeGraph.getAllBuckets(); - expect(buckets.length).toBe(4); - // Buckets should be returned in lexicographic ordering (using hex keys to - // ensure the bucket indexes are in numberical order) - expect(buckets).toEqual([ - { - [node1Id]: { - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(String), - }, - }, - { - [node41Id]: { - address: { host: '41.41.41.41', port: 4141 }, - lastUpdated: expect.any(String), - }, - [node42Id]: { - address: { host: '42.42.42.42', port: 4242 }, - lastUpdated: expect.any(String), - }, - }, - { - [node10Id]: { - address: { host: '10.10.10.10', port: 1010 }, - lastUpdated: expect.any(String), - }, - }, - { - [node255Id]: { - address: { host: '255.255.255.255', port: 255 }, - lastUpdated: expect.any(String), - }, - }, - ]); - }); - test( - 'refreshes buckets', - async () => { - const initialNodes: Record = {}; - // Generate and add some nodes - for (let i = 1; i < 255; i += 20) { - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - keyManager.getNodeId(), - i, - ); - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode(newNodeId, nodeAddress); - initialNodes[newNodeId] = { - id: newNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance( - keyManager.getNodeId(), - newNodeId, - ), - }; + test('reset buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + // Reset the buckets according to the new node ID + // Note that this should normally be only executed when the key manager NodeID changes + // This means methods that use the KeyManager's node ID cannot be used here in this test + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } - - // Renew the keypair - await keyManager.renewRootKeyPair('newPassword'); - // Reset the test's node ID state - nodeId = keyManager.getNodeId(); - // Refresh the buckets - await nodeGraph.refreshBuckets(); - - // Get all the new buckets, and expect that each node is in the correct bucket - const newBuckets = await nodeGraph.getAllBuckets(); - let nodeCount = 0; - for (const b of newBuckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - // Check that it was a node in the original DB - expect(initialNodes[nodeId]).toBeDefined(); - // Check it's in the correct bucket - const expectedIndex = nodesUtils.calculateBucketIndex( - keyManager.getNodeId(), - nodeId, - ); - const expectedBucket = await nodeGraph.getBucket(expectedIndex); - expect(expectedBucket).toBeDefined(); - expect(expectedBucket![nodeId]).toBeDefined(); - // Check it has the correct address - expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); - nodeCount++; + } + expect(buckets1).not.toStrictEqual(buckets0); + // Resetting again should change the space + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + // Resetting to the same NodeId results in the same bucket structure + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets3 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets3).toStrictEqual(buckets2); + // Resetting to an existing NodeId + const nodeIdExisting = buckets3[0][1][0][0]; + let nodeIdExistingFound = false; + await nodeGraph.resetBuckets(nodeIdExisting); + const buckets4 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets4.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets4) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + if (nodeId.equals(nodeIdExisting)) { + nodeIdExistingFound = true; } + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdExisting, nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } - // We had less than k (20) nodes, so we expect that all nodes will be re-added - // If we had more than k nodes, we may lose some of them (because the nodes - // may be re-added to newly full buckets) - expect(Object.keys(initialNodes).length).toEqual(nodeCount); - }, - global.defaultTimeout * 4, - ); - test('updates node', async () => { - // New node added - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Check new node is in retrieved bucket from database - const bucket = await nodeGraph.getBucket(2); - const time1 = bucket![node1Id].lastUpdated; - - // Update node and check that time is later - const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; - await nodeGraph.updateNode(node1Id, newNode1Address); - - const bucket2 = await nodeGraph.getBucket(2); - const time2 = bucket2![node1Id].lastUpdated; - expect(bucket2![node1Id].address).toEqual(newNode1Address); - expect(time1 < time2).toBeTruthy(); + } + expect(buckets4).not.toStrictEqual(buckets3); + // The existing node ID should not be put into the NodeGraph + expect(nodeIdExistingFound).toBe(false); + await nodeGraph.stop(); + }); + test('reset buckets is persistent', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + await nodeGraph.stop(); }); }); diff --git a/tests/nodes/NodeGraph.test.ts.old b/tests/nodes/NodeGraph.test.ts.old new file mode 100644 index 000000000..1960c02d3 --- /dev/null +++ b/tests/nodes/NodeGraph.test.ts.old @@ -0,0 +1,624 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import * as nodesErrors from '@/nodes/errors'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import Sigchain from '@/sigchain/Sigchain'; +import * as nodesTestUtils from './utils'; + +describe(`${NodeGraph.name} test`, () => { + const password = 'password'; + let nodeGraph: NodeGraph; + let nodeId: NodeId; + + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const dummyNode = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + new StreamHandler(), + ]); + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let dataDir: string; + let keyManager: KeyManager; + let db: DB; + let nodeConnectionManager: NodeConnectionManager; + let sigchain: Sigchain; + + const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = `${dataDir}/keys`; + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger, + }); + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger, + }); + + revProxy = new ReverseProxy({ + logger: logger, + }); + + await fwdProxy.start({ + tlsConfig: { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: await keyManager.getRootCertChainPem(), + }, + }); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + sigchain = await Sigchain.createSigchain({ + keyManager: keyManager, + db: db, + logger: logger, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager: keyManager, + nodeGraph: nodeGraph, + fwdProxy: fwdProxy, + revProxy: revProxy, + logger: logger, + }); + await nodeConnectionManager.start(); + // Retrieve the NodeGraph reference from NodeManager + nodeId = keyManager.getNodeId(); + }); + + afterEach(async () => { + await db.stop(); + await sigchain.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await keyManager.stop(); + await fwdProxy.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + }); + + test('NodeGraph readiness', async () => { + const nodeGraph2 = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // @ts-ignore + await expect(nodeGraph2.destroy()).rejects.toThrow( + nodesErrors.ErrorNodeGraphRunning, + ); + // Should be a noop + await nodeGraph2.start(); + await nodeGraph2.stop(); + await nodeGraph2.destroy(); + await expect(async () => { + await nodeGraph2.start(); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + }); + test('knows node (true and false case)', async () => { + // Known node + const nodeAddress1: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + await nodeGraph.setNode(nodeId1, nodeAddress1); + expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); + + // Unknown node + expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); + }); + test('finds correct node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address + const foundAddress = await nodeGraph.getNode(newNode2Id); + expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); + }); + test('unable to find node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address (of non-existent node) + const foundAddress = await nodeGraph.getNode(dummyNode); + expect(foundAddress).toBeUndefined(); + }); + test('adds a single node into a bucket', async () => { + // New node added + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Check new node is in retrieved bucket from database + // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 + const bucket = await nodeGraph.getBucket(1); + expect(bucket).toBeDefined(); + expect(bucket![newNode2Id]).toEqual({ + address: { host: '227.1.1.1', port: 4567 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds multiple nodes into the same bucket', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(4); + expect(bucket).toBeDefined(); + if (!bucket) fail('bucket should be defined, letting TS know'); + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds a single node into different buckets', async () => { + // New node for bucket 3 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); + const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + // New node for bucket 255 (the highest possible bucket) + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const bucket3 = await nodeGraph.getBucket(3); + const bucket351 = await nodeGraph.getBucket(255); + if (bucket3 && bucket351) { + expect(bucket3[newNode1Id]).toEqual({ + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(Date), + }); + expect(bucket351[newNode2Id]).toEqual({ + address: { host: '2.2.2.2', port: 2222 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('deletes a single node (and removes bucket)', async () => { + // New node for bucket 2 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + // Check the bucket is there first + const bucket = await nodeGraph.getBucket(2); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check bucket no longer exists + const newBucket = await nodeGraph.getBucket(2); + expect(newBucket).toBeUndefined(); + }); + test('deletes a single node (and retains remainder of bucket)', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(bucketIndex); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check node no longer exists in the bucket + const newBucket = await nodeGraph.getBucket(bucketIndex); + if (newBucket) { + expect(newBucket[newNode1Id]).toBeUndefined(); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('New bucket undefined'); + } + }); + test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + // Keep a record of the first node ID that we added + const firstNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), + nodeAddress, + ); + // Increment the current node ID + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Attempt to add a new node into this full bucket (increment the last node + // ID that was added) + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + nodeGraph.maxNodesPerBucket + 1, + ); + const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; + await nodeGraph.setNode(newNodeId, newNodeAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket (but no more) + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[newNodeId]).toEqual({ + address: newNodeAddress, + lastUpdated: expect.any(Date), + }); + // NODEID1 should have been removed from this bucket (as this was the least active) + // The first node added should have been removed from this bucket (as this + // was the least active, purely because it was inserted first) + expect(finalBucket[firstNodeId]).toBeUndefined(); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + const currNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + // Keep a record of the first node ID that we added + // const firstNodeId = currNodeId; + let increment = 1; + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), + nodeAddress, + ); + // Increment the current node ID - skip for the last one to keep currNodeId + // as the last added node ID + if (i !== nodeGraph.maxNodesPerBucket) { + increment++; + } + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // If we tried to re-add the first node, it would simply remove the original + // first node, as this is the "least active" + // We instead want to check that we don't mistakenly delete a node if we're + // updating an existing one + // So, re-add the last node + const newLastAddress: NodeAddress = { + host: '30.30.30.30' as Host, + port: 30 as Port, + }; + await nodeGraph.setNode(currNodeId, newLastAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[currNodeId]).toEqual({ + address: newLastAddress, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('retrieves all buckets (in expected lexicographic order)', async () => { + // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) + // Bucket 1 (minimum): + + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Bucket 4 (multiple nodes in 1 bucket): + const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); + const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; + await nodeGraph.setNode(node41Id, node41Address); + const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); + const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; + await nodeGraph.setNode(node42Id, node42Address); + + // Bucket 10 (lexicographic ordering - should appear after 2): + const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); + const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; + await nodeGraph.setNode(node10Id, node10Address); + + // Bucket 255 (maximum): + const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const node255Address = { + host: '255.255.255.255', + port: 255, + } as NodeAddress; + await nodeGraph.setNode(node255Id, node255Address); + + const buckets = await nodeGraph.getAllBuckets(); + expect(buckets.length).toBe(4); + // Buckets should be returned in lexicographic ordering (using hex keys to + // ensure the bucket indexes are in numberical order) + expect(buckets).toEqual([ + { + [node1Id]: { + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(String), + }, + }, + { + [node41Id]: { + address: { host: '41.41.41.41', port: 4141 }, + lastUpdated: expect.any(String), + }, + [node42Id]: { + address: { host: '42.42.42.42', port: 4242 }, + lastUpdated: expect.any(String), + }, + }, + { + [node10Id]: { + address: { host: '10.10.10.10', port: 1010 }, + lastUpdated: expect.any(String), + }, + }, + { + [node255Id]: { + address: { host: '255.255.255.255', port: 255 }, + lastUpdated: expect.any(String), + }, + }, + ]); + }); + test( + 'refreshes buckets', + async () => { + const initialNodes: Record = {}; + // Generate and add some nodes + for (let i = 1; i < 255; i += 20) { + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + i, + ); + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode(newNodeId, nodeAddress); + initialNodes[newNodeId] = { + id: newNodeId, + address: nodeAddress, + distance: nodesUtils.calculateDistance( + keyManager.getNodeId(), + newNodeId, + ), + }; + } + + // Renew the keypair + await keyManager.renewRootKeyPair('newPassword'); + // Reset the test's node ID state + nodeId = keyManager.getNodeId(); + // Refresh the buckets + await nodeGraph.refreshBuckets(); + + // Get all the new buckets, and expect that each node is in the correct bucket + const newBuckets = await nodeGraph.getAllBuckets(); + let nodeCount = 0; + for (const b of newBuckets) { + for (const n of Object.keys(b)) { + const nodeId = IdInternal.fromString(n); + // Check that it was a node in the original DB + expect(initialNodes[nodeId]).toBeDefined(); + // Check it's in the correct bucket + const expectedIndex = nodesUtils.calculateBucketIndex( + keyManager.getNodeId(), + nodeId, + ); + const expectedBucket = await nodeGraph.getBucket(expectedIndex); + expect(expectedBucket).toBeDefined(); + expect(expectedBucket![nodeId]).toBeDefined(); + // Check it has the correct address + expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); + nodeCount++; + } + } + // We had less than k (20) nodes, so we expect that all nodes will be re-added + // If we had more than k nodes, we may lose some of them (because the nodes + // may be re-added to newly full buckets) + expect(Object.keys(initialNodes).length).toEqual(nodeCount); + }, + global.defaultTimeout * 4, + ); + test('updates node', async () => { + // New node added + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Check new node is in retrieved bucket from database + const bucket = await nodeGraph.getBucket(2); + const time1 = bucket![node1Id].lastUpdated; + + // Update node and check that time is later + const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; + await nodeGraph.updateNode(node1Id, newNode1Address); + + const bucket2 = await nodeGraph.getBucket(2); + const time2 = bucket2![node1Id].lastUpdated; + expect(bucket2![node1Id].address).toEqual(newNode1Address); + expect(time1 < time2).toBeTruthy(); + }); +}); diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index ee1aeadc4..59d565812 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -1,48 +1,69 @@ import type { NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import lexi from 'lexicographic-integer'; import { IdInternal } from '@matrixai/id'; +import { DB } from '@matrixai/db'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; -describe('Nodes utils', () => { - test('basic distance calculation', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 23, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]); - - const distance = nodesUtils.calculateDistance(nodeId1, nodeId2); - expect(distance).toEqual(316912758671486456376015716356n); +describe('nodes/utils', () => { + const logger = new Logger(`nodes/utils test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + let dataDir: string; + let db: DB; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const dbKey = await keysUtils.generateKey(); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); }); - test('calculates correct first bucket (bucket 0)', async () => { - // "1" XOR "0" = distance of 1 - // Therefore, bucket 0 - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(0); + afterEach(async () => { + await db.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); }); - test('calculates correct arbitrary bucket (bucket 63)', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 255, 0, 0, 0, 0, 0, 0, 0, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(63); + test('calculating bucket index from the same node ID', () => { + const nodeId1 = IdInternal.create([0]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + expect(distance).toBe(0n); + expect(() => nodesUtils.bucketIndex(nodeId1, nodeId2)).toThrow(RangeError); + }); + test('calculating bucket index 0', () => { + // Distance is calculated based on XOR operation + // 1 ^ 0 == 1 + // Distance of 1 is bucket 0 + const nodeId1 = IdInternal.create([1]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + expect(distance).toBe(1n); + expect(bucketIndex).toBe(0); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); }); - test('calculates correct last bucket (bucket 255)', async () => { + test('calculating bucket index 255', () => { const nodeId1 = IdInternal.create([ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -51,7 +72,103 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(255); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + }); + test('calculating bucket index randomly', () => { + for (let i = 0; i < 1000; i++) { + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + if (nodeId1.equals(nodeId2)) { + continue; + } + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + } + }); + test('parse NodeGraph buckets db key', async () => { + const bucketsDb = await db.level('buckets'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const nodeId = testNodesUtils.generateRandomNodeId(); + data.push({ + bucketIndex, + bucketKey, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), nodeId]), + }); + const bucketDomain = ['buckets', bucketKey]; + await db.put(bucketDomain, nodesUtils.bucketDbKey(nodeId), null); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of the bucket key and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of bucketsDb.createKeyStream()) { + const { bucketIndex, bucketKey, nodeId } = nodesUtils.parseBucketsDbKey( + key as Buffer, + ); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('parse NodeGraph lastUpdated buckets db key', async () => { + const lastUpdatedDb = await db.level('lastUpdated'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = lexi.pack(bucketIndex, 'hex'); + const lastUpdated = utils.getUnixtime(); + const nodeId = testNodesUtils.generateRandomNodeId(); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + lastUpdated, + nodeId, + ); + data.push({ + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), lastUpdatedKey]), + }); + const lastUpdatedDomain = ['lastUpdated', bucketKey]; + await db.put(lastUpdatedDomain, lastUpdatedKey, null); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of + // the bucket key and last updated and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of lastUpdatedDb.createKeyStream()) { + const { bucketIndex, bucketKey, lastUpdated, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(lastUpdated).toBe(data[i].lastUpdated); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } }); }); diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index fca9ad53b..e6c603e14 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,9 +1,27 @@ import type { NodeId, NodeAddress } from '@/nodes/types'; - import type PolykeyAgent from '@/PolykeyAgent'; import { IdInternal } from '@matrixai/id'; +import * as keysUtils from '@/keys/utils'; import { bigInt2Bytes } from '@/utils'; +/** + * Generate random `NodeId` + * If `readable` is `true`, then it will generate a `NodeId` where + * its binary string form will only contain hex characters + * However the `NodeId` will not be uniformly random as it will not cover + * the full space of possible node IDs + * Prefer to keep `readable` `false` if possible to ensure tests are robust + */ +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + /** * Generate a deterministic NodeId for a specific bucket given an existing NodeId * This requires solving the bucket index (`i`) and distance equation: @@ -61,4 +79,4 @@ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { } as NodeAddress); } -export { generateNodeIdForBucket, nodesConnect }; +export { generateRandomNodeId, generateNodeIdForBucket, nodesConnect }; diff --git a/tests/notifications/utils.test.ts b/tests/notifications/utils.test.ts index 5a3b8a617..fa6373e38 100644 --- a/tests/notifications/utils.test.ts +++ b/tests/notifications/utils.test.ts @@ -2,16 +2,15 @@ import type { Notification, NotificationData } from '@/notifications/types'; import type { VaultActions, VaultName } from '@/vaults/types'; import { createPublicKey } from 'crypto'; import { EmbeddedJWK, jwtVerify, exportJWK } from 'jose'; - import * as keysUtils from '@/keys/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Notifications utils', () => { - const nodeId = testUtils.generateRandomNodeId(); + const nodeId = testNodesUtils.generateRandomNodeId(); const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const vaultId = vaultsUtils.generateVaultId(); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); @@ -206,7 +205,7 @@ describe('Notifications utils', () => { }); test('validates correct notifications', async () => { - const nodeIdOther = testUtils.generateRandomNodeId(); + const nodeIdOther = testNodesUtils.generateRandomNodeId(); const nodeIdOtherEncoded = nodesUtils.encodeNodeId(nodeIdOther); const generalNotification: Notification = { data: { diff --git a/tests/sigchain/Sigchain.test.ts b/tests/sigchain/Sigchain.test.ts index e53a4c67f..e35a3c20a 100644 --- a/tests/sigchain/Sigchain.test.ts +++ b/tests/sigchain/Sigchain.test.ts @@ -13,6 +13,7 @@ import * as sigchainErrors from '@/sigchain/errors'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Sigchain', () => { const logger = new Logger('Sigchain Test', LogLevel.WARN, [ @@ -20,25 +21,25 @@ describe('Sigchain', () => { ]); const password = 'password'; const srcNodeIdEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId2Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId3Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdAEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdBEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdCEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdDEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); let mockedGenerateKeyPair: jest.SpyInstance; @@ -344,7 +345,9 @@ describe('Sigchain', () => { // Add 10 claims for (let i = 1; i <= 5; i++) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); node2s.push(node2); const nodeLink: ClaimData = { type: 'node', @@ -393,7 +396,9 @@ describe('Sigchain', () => { for (let i = 1; i <= 30; i++) { // If even, add a node link if (i % 2 === 0) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); nodes[i] = node2; const nodeLink: ClaimData = { type: 'node', diff --git a/tests/status/Status.test.ts b/tests/status/Status.test.ts index 311f89a11..0b0744002 100644 --- a/tests/status/Status.test.ts +++ b/tests/status/Status.test.ts @@ -6,15 +6,15 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import config from '@/config'; import { Status, errors as statusErrors } from '@/status'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Status', () => { const logger = new Logger(`${Status.name} Test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeId1 = testUtils.generateRandomNodeId(); - const nodeId2 = testUtils.generateRandomNodeId(); - const nodeId3 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + const nodeId3 = testNodesUtils.generateRandomNodeId(); let dataDir: string; beforeEach(async () => { dataDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'status-test-')); diff --git a/tests/utils.ts b/tests/utils.ts index 311743565..ea2d11ff9 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -71,113 +71,115 @@ async function setupGlobalKeypair() { } } -/** - * Setup the global agent - * Use this in beforeAll, and use the closeGlobalAgent in afterAll - * This is expected to be executed by multiple worker processes - * Uses a references directory as a reference count - * Uses fd-lock to serialise access - * This means all test modules using this will be serialised - * Any beforeAll must use globalThis.maxTimeout - * Tips for usage: - * * Do not restart this global agent - * * Ensure client-side side-effects are removed at the end of each test - * * Ensure server-side side-effects are removed at the end of each test - */ +// FIXME: what is going on here? is this getting removed? +// /** +// * Setup the global agent +// * Use this in beforeAll, and use the closeGlobalAgent in afterAll +// * This is expected to be executed by multiple worker processes +// * Uses a references directory as a reference count +// * Uses fd-lock to serialise access +// * This means all test modules using this will be serialised +// * Any beforeAll must use globalThis.maxTimeout +// * Tips for usage: +// * * Do not restart this global agent +// * * Ensure client-side side-effects are removed at the end of each test +// * * Ensure server-side side-effects are removed at the end of each test +// */ async function setupGlobalAgent( logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), -) { - const globalAgentPassword = 'password'; - const globalAgentDir = path.join(globalThis.dataDir, 'agent'); - // The references directory will act like our reference count - await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { - recursive: true, - }); - const pid = process.pid.toString(); - // Plus 1 to the reference count - await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); - const globalAgentLock = await fs.promises.open( - path.join(globalThis.dataDir, 'agent.lock'), - fs.constants.O_WRONLY | fs.constants.O_CREAT, - ); - while (!lock(globalAgentLock.fd)) { - await sleep(1000); - } - const status = new Status({ - statusPath: path.join(globalAgentDir, config.defaults.statusBase), - statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), - fs, - }); - let statusInfo = await status.readStatus(); - if (statusInfo == null || statusInfo.status === 'DEAD') { - await PolykeyAgent.createPolykeyAgent({ - password: globalAgentPassword, - nodePath: globalAgentDir, - networkConfig: { - proxyHost: '127.0.0.1' as Host, - forwardHost: '127.0.0.1' as Host, - agentHost: '127.0.0.1' as Host, - clientHost: '127.0.0.1' as Host, - }, - keysConfig: { - rootKeyPairBits: 2048, - }, - seedNodes: {}, // Explicitly no seed nodes on startup - logger, - }); - statusInfo = await status.readStatus(); - } - return { - globalAgentDir, - globalAgentPassword, - globalAgentStatus: statusInfo as StatusLive, - globalAgentClose: async () => { - // Closing the global agent cannot be done in the globalTeardown - // This is due to a sequence of reasons: - // 1. The global agent is not started as a separate process - // 2. Because we need to be able to mock dependencies - // 3. This means it is part of a jest worker process - // 4. Which will block termination of the jest worker process - // 5. Therefore globalTeardown will never get to execute - // 6. The global agent is not part of globalSetup - // 7. Because not all tests need the global agent - // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes - try { - await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); - // If the references directory is not empty - // there are other processes still using the global agent - try { - await fs.promises.rmdir(path.join(globalAgentDir, 'references')); - } catch (e) { - if (e.code === 'ENOTEMPTY') { - return; - } - throw e; - } - // Stopping may occur in a different jest worker process - // therefore we cannot rely on pkAgent, but instead use GRPC - const statusInfo = (await status.readStatus()) as StatusLive; - const grpcClient = await GRPCClientClient.createGRPCClientClient({ - nodeId: statusInfo.data.nodeId, - host: statusInfo.data.clientHost, - port: statusInfo.data.clientPort, - tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, - logger, - }); - const emptyMessage = new utilsPB.EmptyMessage(); - const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); - // This is asynchronous - await grpcClient.agentStop(emptyMessage, meta); - await grpcClient.destroy(); - await status.waitFor('DEAD'); - } finally { - lock.unlock(globalAgentLock.fd); - await globalAgentLock.close(); - } - }, - }; +): Promise { + throw Error('not implemented'); + // Const globalAgentPassword = 'password'; + // const globalAgentDir = path.join(globalThis.dataDir, 'agent'); + // // The references directory will act like our reference count + // await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { + // recursive: true, + // }); + // const pid = process.pid.toString(); + // // Plus 1 to the reference count + // await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); + // const globalAgentLock = await fs.promises.open( + // path.join(globalThis.dataDir, 'agent.lock'), + // fs.constants.O_WRONLY | fs.constants.O_CREAT, + // ); + // while (!lock(globalAgentLock.fd)) { + // await sleep(1000); + // } + // const status = new Status({ + // statusPath: path.join(globalAgentDir, config.defaults.statusBase), + // statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), + // fs, + // }); + // let statusInfo = await status.readStatus(); + // if (statusInfo == null || statusInfo.status === 'DEAD') { + // await PolykeyAgent.createPolykeyAgent({ + // password: globalAgentPassword, + // nodePath: globalAgentDir, + // networkConfig: { + // proxyHost: '127.0.0.1' as Host, + // forwardHost: '127.0.0.1' as Host, + // agentHost: '127.0.0.1' as Host, + // clientHost: '127.0.0.1' as Host, + // }, + // keysConfig: { + // rootKeyPairBits: 2048, + // }, + // seedNodes: {}, // Explicitly no seed nodes on startup + // logger, + // }); + // statusInfo = await status.readStatus(); + // } + // return { + // globalAgentDir, + // globalAgentPassword, + // globalAgentStatus: statusInfo as StatusLive, + // globalAgentClose: async () => { + // // Closing the global agent cannot be done in the globalTeardown + // // This is due to a sequence of reasons: + // // 1. The global agent is not started as a separate process + // // 2. Because we need to be able to mock dependencies + // // 3. This means it is part of a jest worker process + // // 4. Which will block termination of the jest worker process + // // 5. Therefore globalTeardown will never get to execute + // // 6. The global agent is not part of globalSetup + // // 7. Because not all tests need the global agent + // // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes + // try { + // await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); + // // If the references directory is not empty + // // there are other processes still using the global agent + // try { + // await fs.promises.rmdir(path.join(globalAgentDir, 'references')); + // } catch (e) { + // if (e.code === 'ENOTEMPTY') { + // return; + // } + // throw e; + // } + // // Stopping may occur in a different jest worker process + // // therefore we cannot rely on pkAgent, but instead use GRPC + // const statusInfo = (await status.readStatus()) as StatusLive; + // const grpcClient = await GRPCClientClient.createGRPCClientClient({ + // nodeId: statusInfo.data.nodeId, + // host: statusInfo.data.clientHost, + // port: statusInfo.data.clientPort, + // tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, + // logger, + // }); + // const emptyMessage = new utilsPB.EmptyMessage(); + // const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); + // // This is asynchronous + // await grpcClient.agentStop(emptyMessage, meta); + // await grpcClient.destroy(); + // await status.waitFor('DEAD'); + // } finally { + // lock.unlock(globalAgentLock.fd); + // await globalAgentLock.close(); + // } + // }, + // }; } function generateRandomNodeId(): NodeId { diff --git a/tests/vaults/VaultOps.test.ts b/tests/vaults/VaultOps.test.ts index 81e061cd3..c766ddd74 100644 --- a/tests/vaults/VaultOps.test.ts +++ b/tests/vaults/VaultOps.test.ts @@ -14,6 +14,7 @@ import * as vaultOps from '@/vaults/VaultOps'; import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('VaultOps', () => { const logger = new Logger('VaultOps', LogLevel.WARN, [new StreamHandler()]); @@ -27,7 +28,7 @@ describe('VaultOps', () => { let vaultsDbPath: LevelPath; const dummyKeyManager = { getNodeId: () => { - return testUtils.generateRandomNodeId(); + return testNodesUtils.generateRandomNodeId(); }, } as KeyManager; From c2b77b4e9c09f9dcddbfe52ce03ecc1674329132 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 15 Mar 2022 17:55:20 +1100 Subject: [PATCH 03/39] feat: added `NodeGraph.getClosestNodes()` Implemented `getClosestNodes()` and relevant tests in `NodeGraph.test.ts`. Relates to #212 --- .../service/nodesClosestLocalNodesGet.ts | 21 +- src/nodes/NodeConnectionManager.ts | 57 +-- src/nodes/NodeGraph.ts | 136 ++++++- .../NodeConnectionManager.general.test.ts | 124 ------- tests/nodes/NodeGraph.test.ts | 341 +++++++++++++++++- 5 files changed, 489 insertions(+), 190 deletions(-) diff --git a/src/agent/service/nodesClosestLocalNodesGet.ts b/src/agent/service/nodesClosestLocalNodesGet.ts index 5c2c0e204..844aa0253 100644 --- a/src/agent/service/nodesClosestLocalNodesGet.ts +++ b/src/agent/service/nodesClosestLocalNodesGet.ts @@ -1,6 +1,6 @@ import type * as grpc from '@grpc/grpc-js'; +import type { NodeGraph } from '../../nodes'; import type { DB } from '@matrixai/db'; -import type NodeConnectionManager from '../../nodes/NodeConnectionManager'; import type { NodeId } from '../../nodes/types'; import type Logger from '@matrixai/logger'; import * as grpcUtils from '../../grpc/utils'; @@ -16,11 +16,11 @@ import * as agentUtils from '../utils'; * to some provided node ID. */ function nodesClosestLocalNodesGet({ - nodeConnectionManager, + nodeGraph, db, logger, }: { - nodeConnectionManager: NodeConnectionManager; + nodeGraph: NodeGraph; db: DB; logger: Logger; }) { @@ -47,21 +47,16 @@ function nodesClosestLocalNodesGet({ ); // Get all local nodes that are closest to the target node from the request const closestNodes = await db.withTransactionF( - async (tran) => - await nodeConnectionManager.getClosestLocalNodes( - nodeId, - undefined, - tran, - ), + async (tran) => await nodeGraph.getClosestNodes(nodeId, tran), ); - for (const node of closestNodes) { + for (const [nodeId, nodeData] of closestNodes) { const addressMessage = new nodesPB.Address(); - addressMessage.setHost(node.address.host); - addressMessage.setPort(node.address.port); + addressMessage.setHost(nodeData.address.host); + addressMessage.setPort(nodeData.address.port); // Add the node to the response's map (mapping of node ID -> node address) response .getNodeTableMap() - .set(nodesUtils.encodeNodeId(node.id), addressMessage); + .set(nodesUtils.encodeNodeId(nodeId), addressMessage); } callback(null, response); return; diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 5c1b34cb7..fc5c99ff8 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -383,49 +383,7 @@ class NodeConnectionManager { return address; } - /** - * Finds the set of nodes (of size k) known by the current node (i.e. in its - * bucket's database) that have the smallest distance to the target node (i.e. - * are closest to the target node). - * i.e. FIND_NODE RPC from Kademlia spec - * - * Used by the RPC service. - * - * @param targetNodeId the node ID to find other nodes closest to it - * @param numClosest the number of the closest nodes to return (by default, returns - * according to the maximum number of nodes per bucket) - * @param tran - * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the - * current node has less than k nodes in all of its buckets, in which case it - * returns all nodes it has knowledge of) - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async getClosestLocalNodes( - targetNodeId: NodeId, - numClosest: number = this.nodeGraph.maxNodesPerBucket, - tran?: DBTransaction, - ): Promise> { - // Retrieve all nodes from buckets in database - const buckets = await this.nodeGraph.getAllBuckets(tran); - // Iterate over all the nodes in each bucket - const distanceToNodes: Array = []; - buckets.forEach(function (bucket) { - for (const nodeIdString of Object.keys(bucket)) { - // Compute the distance from the node, and add it to the array - const nodeId = IdInternal.fromString(nodeIdString); - distanceToNodes.push({ - id: nodeId, - address: bucket[nodeId].address, - distance: nodesUtils.calculateDistance(nodeId, targetNodeId), - }); - } - }); - // Sort the array (based on the distance at index 1) - distanceToNodes.sort(nodesUtils.sortByDistance); - // Return the closest k nodes (i.e. the first k), or all nodes if < k in array - return distanceToNodes.slice(0, numClosest); - } - + // FIXME: getClosestNodes was moved to NodeGraph? that needs to be updated. /** * Attempts to locate a target node in the network (using Kademlia). * Adds all discovered, active nodes to the current node's database (up to k @@ -447,7 +405,7 @@ class NodeConnectionManager { // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) - const shortlist: Array = await this.getClosestLocalNodes( + const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, ); @@ -470,8 +428,9 @@ class NodeConnectionManager { if (nextNode == null) { break; } + const [nextNodeId, nextNodeAddress] = nextNode; // Skip if the node has already been contacted - if (contacted[nextNode.id]) { + if (contacted[nextNodeId]) { continue; } // Connect to the node (check if pre-existing connection exists, otherwise @@ -479,16 +438,16 @@ class NodeConnectionManager { try { // Add the node to the database so that we can find its address in // call to getConnectionToNode - await this.nodeGraph.setNode(nextNode.id, nextNode.address); - await this.getConnection(nextNode.id); + await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); + await this.getConnection(nextNodeId); } catch (e) { // If we can't connect to the node, then skip it continue; } - contacted[nextNode.id] = true; + contacted[nextNodeId] = true; // Ask the node to get their own closest nodes to the target const foundClosest = await this.getRemoteNodeClosestNodes( - nextNode.id, + nextNodeId, targetNodeId, ); // Check to see if any of these are the target node. At the same time, add diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 9fa404896..34fa5c56a 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -105,7 +105,7 @@ class NodeGraph { // Bucket metadata sublevel: `!meta!! -> value` this.nodeGraphMetaDbPath = [...this.nodeGraphDbPath, 'meta' + space]; // Bucket sublevel: `!buckets!! -> NodeData` - // The BucketIndex can range from 0 to NodeId bitsize minus 1 + // The BucketIndex can range from 0 to NodeId bit-size minus 1 // So 256 bits means 256 buckets of 0 to 255 this.nodeGraphBucketsDbPath = [...this.nodeGraphDbPath, 'buckets' + space]; // Last updated sublevel: `!lastUpdated!!- -> NodeId` @@ -166,7 +166,7 @@ class NodeGraph { } /** - * Get all nodes + * Get all nodes. * Nodes are always sorted by `NodeBucketIndex` first * Then secondly by the node IDs * The `order` parameter applies to both, for example possible sorts: @@ -340,7 +340,7 @@ class NodeGraph { } /** - * Gets all buckets + * Gets all buckets. * Buckets are always sorted by `NodeBucketIndex` first * Then secondly by the `sort` parameter * The `order` parameter applies to both, for example possible sorts: @@ -582,6 +582,136 @@ class NodeGraph { return value; } + /** + * Finds the set of nodes (of size k) known by the current node (i.e. in its + * buckets' database) that have the smallest distance to the target node (i.e. + * are closest to the target node). + * i.e. FIND_NODE RPC from Kademlia spec + * + * Used by the RPC service. + * + * @param nodeId the node ID to find other nodes closest to it + * @param limit the number of the closest nodes to return (by default, returns + * according to the maximum number of nodes per bucket) + * @param tran + * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the + * current node has less than k nodes in all of its buckets, in which case it + * returns all nodes it has knowledge of) + */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getClosestNodes( + nodeId: NodeId, + limit: number = this.nodeBucketLimit, + tran: DBTransaction, + ): Promise { + // Buckets map to the target node in the following way; + // 1. 0, 1, ..., T-1 -> T + // 2. T -> 0, 1, ..., T-1 + // 3. T+1, T+2, ..., 255 are unchanged + // We need to obtain nodes in the following bucket order + // 1. T + // 2. iterate over 0 ---> T-1 + // 3. iterate over T+1 ---> K + // Need to work out the relevant bucket to start from + const startingBucket = nodesUtils.bucketIndex( + this.keyManager.getNodeId(), + nodeId, + ); + // Getting the whole target's bucket first + const nodeIds: NodeBucket = await this.getBucket( + startingBucket, + undefined, + undefined, + tran, + ); + // We need to iterate over the key stream + // When streaming we want all nodes in the starting bucket + // The keys takes the form `!(lexpack bucketId)!(nodeId)` + // We can just use `!(lexpack bucketId)` to start from + // Less than `!(bucketId 101)!` gets us buckets 100 and lower + // greater than `!(bucketId 99)!` gets up buckets 100 and greater + const prefix = Buffer.from([33]); // Code for `!` prefix + if (nodeIds.length < limit) { + // Just before target bucket + const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket)); + const endKeyLower = Buffer.concat([prefix, bucketId, prefix]); + const remainingLimit = limit - nodeIds.length; + // Iterate over lower buckets + tran.iterator( + { + lt: endKeyLower, + limit: remainingLimit, + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + ); + for await (const [key, nodeData] of tran.iterator( + { + lt: endKeyLower, + limit: remainingLimit, + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const info = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + nodeIds.push([info.nodeId, nodeData]); + } + } + if (nodeIds.length < limit) { + // Just after target bucket + const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket + 1)); + const startKeyUpper = Buffer.concat([prefix, bucketId, prefix]); + const remainingLimit = limit - nodeIds.length; + // Iterate over ids further away + tran.iterator( + { + gt: startKeyUpper, + limit: remainingLimit, + }, + this.nodeGraphBucketsDbPath, + ); + for await (const [key, nodeData] of tran.iterator( + { + gt: startKeyUpper, + limit: remainingLimit, + valueAsBuffer: false, + }, + this.nodeGraphBucketsDbPath, + )) { + const info = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + nodeIds.push([info.nodeId, nodeData]); + } + } + // If no nodes were found, return nothing + if (nodeIds.length === 0) return []; + // Need to get the whole of the last bucket + const lastBucketIndex = nodesUtils.bucketIndex( + this.keyManager.getNodeId(), + nodeIds[nodeIds.length - 1][0], + ); + const lastBucket = await this.getBucket( + lastBucketIndex, + undefined, + undefined, + tran, + ); + // Pop off elements of the same bucket to avoid duplicates + let element = nodeIds.pop(); + while ( + element != null && + nodesUtils.bucketIndex(this.keyManager.getNodeId(), element[0]) === + lastBucketIndex + ) { + element = nodeIds.pop(); + } + if (element != null) nodeIds.push(element); + // Adding last bucket to the list + nodeIds.push(...lastBucket); + + nodesUtils.bucketSortByDistance(nodeIds, nodeId, 'asc'); + return nodeIds.slice(0, limit); + } + /** * Sets a bucket meta property * This is protected because users cannot directly manipulate bucket meta diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index d21be106b..24986923b 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -74,7 +74,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { let keyManager: KeyManager; let db: DB; let proxy: Proxy; - let nodeGraph: NodeGraph; let remoteNode1: PolykeyAgent; @@ -336,129 +335,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { }, global.failedConnectionTimeout * 2, ); - test('finds a single closest node', async () => { - // NodeConnectionManager under test - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Find the closest nodes to some node, NODEID3 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest).toContainEqual({ - id: newNode2Id, - distance: 121n, - address: { host: '227.1.1.1', port: 4567 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds 3 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Add 3 nodes - await nodeGraph.setNode(nodeId1, { - host: '2.2.2.2', - port: 2222, - } as NodeAddress); - await nodeGraph.setNode(nodeId2, { - host: '3.3.3.3', - port: 3333, - } as NodeAddress); - await nodeGraph.setNode(nodeId3, { - host: '4.4.4.4', - port: 4444, - } as NodeAddress); - - // Find the closest nodes to some node, NODEID4 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest.length).toBe(5); - expect(closest).toContainEqual({ - id: nodeId3, - distance: 0n, - address: { host: '4.4.4.4', port: 4444 }, - }); - expect(closest).toContainEqual({ - id: nodeId2, - distance: 116n, - address: { host: '3.3.3.3', port: 3333 }, - }); - expect(closest).toContainEqual({ - id: nodeId1, - distance: 121n, - address: { host: '2.2.2.2', port: 2222 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds the 20 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Generate the node ID to find the closest nodes to (in bucket 100) - const nodeId = keyManager.getNodeId(); - const nodeIdToFind = testNodesUtils.generateNodeIdForBucket(nodeId, 100); - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = testNodesUtils.generateNodeIdForBucket( - nodeIdToFind, - i, - ); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(nodeIdToFind, closeNodeId), - }); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: `${i}.${i}.${i}.${i}` as Host, - port: i as Port, - }; - await nodeGraph.setNode(farNodeId, nodeAddress); - } - - // Find the closest nodes to the original generated node ID - const closest = await nodeConnectionManager.getClosestLocalNodes( - nodeIdToFind, - ); - // We should always only receive k nodes - expect(closest.length).toBe(nodeGraph.maxNodesPerBucket); - // Retrieved closest nodes should be exactly the same as the ones we added - expect(closest).toEqual(addedClosestNodes); - } finally { - await nodeConnectionManager.stop(); - } - }); test('receives 20 closest local nodes from connected target', async () => { let serverPKAgent: PolykeyAgent | undefined; let nodeConnectionManager: NodeConnectionManager | undefined; diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 6ea350cad..abf5534cd 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -172,7 +172,7 @@ describe(`${NodeGraph.name} test`, () => { (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); let bucketIndexes: Array; - let nodes: Array<[NodeId, NodeData]>; + let nodes: NodeBucket; nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); expect(nodes).toHaveLength(0); for (const nodeId of nodeIds) { @@ -715,4 +715,343 @@ describe(`${NodeGraph.name} test`, () => { expect(buckets2).not.toStrictEqual(buckets1); await nodeGraph.stop(); }); + test('get closest nodes, 40 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 5', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 5); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 5 nodes lower than target, 10 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 95 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 40 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, no nodes, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); + + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); }); From 6518e58064e7c831893eb5446f59af8e9cc16996 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 17 Mar 2022 17:32:01 +1100 Subject: [PATCH 04/39] fix: `NodeManager.setNode` Properly handles adding new node when bucket is full Logic of adding nodes has been split between `NodeManager` and `NodeGraph`. The `NodeGraph.setNode` just handles adding a node to the bucket where the `NodeManager.setNode` contains the logic of when to add the node Relates #359 --- .../service/nodesClosestLocalNodesGet.ts | 3 +- src/client/GRPCClientClient.ts | 11 +- src/client/service/nodesAdd.ts | 1 + src/nodes/NodeConnectionManager.ts | 10 +- src/nodes/NodeGraph.ts | 153 +++++++++++---- src/nodes/NodeManager.ts | 67 ++++++- src/nodes/utils.ts | 3 - tests/acl/ACL.test.ts | 13 -- tests/nodes/NodeConnection.test.ts | 8 +- tests/nodes/NodeManager.test.ts | 181 ++++++++++++++++++ 10 files changed, 375 insertions(+), 75 deletions(-) diff --git a/src/agent/service/nodesClosestLocalNodesGet.ts b/src/agent/service/nodesClosestLocalNodesGet.ts index 844aa0253..36a172b12 100644 --- a/src/agent/service/nodesClosestLocalNodesGet.ts +++ b/src/agent/service/nodesClosestLocalNodesGet.ts @@ -47,7 +47,8 @@ function nodesClosestLocalNodesGet({ ); // Get all local nodes that are closest to the target node from the request const closestNodes = await db.withTransactionF( - async (tran) => await nodeGraph.getClosestNodes(nodeId, tran), + async (tran) => + await nodeGraph.getClosestNodes(nodeId, undefined, tran), ); for (const [nodeId, nodeData] of closestNodes) { const addressMessage = new nodesPB.Address(); diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index 78b13ec9d..2b1b905db 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -3,7 +3,7 @@ import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; import type { AsyncGeneratorReadableStreamClient } from '../grpc/types'; import type { Session } from '../sessions'; import type { NodeId } from '../nodes/types'; -import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; +import type { Host, Port, ProxyConfig, TLSConfig } from '../network/types'; import type * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import type * as agentPB from '../proto/js/polykey/v1/agent/agent_pb'; import type * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; @@ -68,7 +68,7 @@ class GRPCClientClient extends GRPCClient { interceptors, logger, }); - const grpcClientClient = new GRPCClientClient({ + return new GRPCClientClient({ client, nodeId, host, @@ -80,7 +80,6 @@ class GRPCClientClient extends GRPCClient { destroyCallback, logger, }); - return grpcClientClient; } public async destroy() { @@ -905,6 +904,12 @@ class GRPCClientClient extends GRPCClient { public nodesGetAll(...args) { return grpcUtils.promisifyUnaryCall( this.client, + { + nodeId: this.nodeId, + host: this.host, + port: this.port, + command: this.identitiesAuthenticate.name, + }, this.client.nodesGetAll, )(...args); } diff --git a/src/client/service/nodesAdd.ts b/src/client/service/nodesAdd.ts index 079d2eee2..0884a0f0b 100644 --- a/src/client/service/nodesAdd.ts +++ b/src/client/service/nodesAdd.ts @@ -67,6 +67,7 @@ function nodesAdd({ host, port, } as NodeAddress, + undefined, tran, ), ); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index fc5c99ff8..c20f093f3 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -10,9 +10,7 @@ import type { NodeId, NodeIdString, SeedNodes, - NodeEntry, } from './types'; -import type { DBTransaction } from '@matrixai/db'; import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; @@ -103,7 +101,7 @@ class NodeConnectionManager { this.logger.info(`Starting ${this.constructor.name}`); for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; - await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); + await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); // FIXME: also fine implicit transactions } this.logger.info(`Started ${this.constructor.name}`); } @@ -243,6 +241,7 @@ class NodeConnectionManager { )}`, ); // Creating the connection and set in map + // FIXME: this is fine, just use the implicit tran. fix this when adding optional transactions const targetAddress = await this.findNode(targetNodeId); // If the stored host is not a valid host (IP address), // then we assume it to be a hostname @@ -363,6 +362,7 @@ class NodeConnectionManager { * Retrieves the node address. If an entry doesn't exist in the db, then * proceeds to locate it using Kademlia. * @param targetNodeId Id of the node we are tying to find + * @param tran */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async findNode(targetNodeId: NodeId): Promise { @@ -405,6 +405,7 @@ class NodeConnectionManager { // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) + // FIXME: no tran const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, @@ -438,6 +439,7 @@ class NodeConnectionManager { try { // Add the node to the database so that we can find its address in // call to getConnectionToNode + // FIXME: no tran await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); await this.getConnection(nextNodeId); } catch (e) { @@ -458,6 +460,7 @@ class NodeConnectionManager { continue; } if (nodeId.equals(targetNodeId)) { + // FIXME: no tran await this.nodeGraph.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it @@ -556,6 +559,7 @@ class NodeConnectionManager { ); for (const [nodeId, nodeData] of nodes) { // FIXME: this should be the `nodeManager.setNode` + // FIXME: no tran needed await this.nodeGraph.setNode(nodeId, nodeData.address); } } diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 34fa5c56a..0bf30f3ae 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -154,8 +154,14 @@ class NodeGraph { @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getNode( nodeId: NodeId, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getNode(nodeId, tran), + ); + } + const [bucketIndex] = this.bucketIndex(nodeId); const bucketDomain = [ ...this.nodeGraphBucketsDbPath, @@ -176,8 +182,15 @@ class NodeGraph { @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async *getNodes( order: 'asc' | 'desc' = 'asc', - tran: DBTransaction, + tran?: DBTransaction, ): AsyncGenerator<[NodeId, NodeData]> { + if (tran == null) { + const getNodes = (tran) => this.getNodes(order, tran); + return yield* this.db.withTransactionG(async function* (tran) { + return yield* getNodes(tran); + }); + } + for await (const [key, nodeData] of tran.iterator( { reverse: order !== 'asc', @@ -190,12 +203,25 @@ class NodeGraph { } } + /** + * Will add a node to the node graph and increment the bucket count. + * If the node already existed it will be updated. + * @param nodeId NodeId to add to the NodeGraph + * @param nodeAddress Address information to add + * @param tran + */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.setNode(nodeId, nodeAddress, tran), + ); + } + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; @@ -203,57 +229,67 @@ class NodeGraph { ...bucketPath, nodesUtils.bucketDbKey(nodeId), ]); - // If this is a new entry, check the bucket limit - if (nodeData == null) { - const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); - if (count < this.nodeBucketLimit) { - // Increment the bucket count - await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); - } else { - // Remove the oldest entry in the bucket - let oldestLastUpdatedKey: Buffer; - let oldestNodeId: NodeId; - for await (const [key] of tran.iterator( - { - limit: 1, - values: false, - }, - this.nodeGraphLastUpdatedDbPath, - )) { - oldestLastUpdatedKey = key as unknown as Buffer; - ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( - key as unknown as Buffer, - )); - } - await tran.del([...bucketPath, oldestNodeId!.toBuffer()]); - await tran.del([...lastUpdatedPath, oldestLastUpdatedKey!]); - } - } else { - // This is an existing entry, so the index entry must be reset + if (nodeData != null) { + // If the node already exists we want to remove the old `lastUpdated` const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( nodeData.lastUpdated, nodeId, ); await tran.del([...lastUpdatedPath, lastUpdatedKey]); + } else { + // It didn't exist so we want to increment the bucket count + const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); + await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); } const lastUpdated = getUnixtime(); await tran.put([...bucketPath, nodesUtils.bucketDbKey(nodeId)], { address: nodeAddress, lastUpdated, }); - const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + const newLastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( lastUpdated, nodeId, ); await tran.put( - [...lastUpdatedPath, lastUpdatedKey], + [...lastUpdatedPath, newLastUpdatedKey], nodesUtils.bucketDbKey(nodeId), true, ); } @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { + public async getOldestNode( + bucketIndex: number, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getOldestNode(bucketIndex, tran), + ); + } + + const bucketKey = nodesUtils.bucketKey(bucketIndex); + // Remove the oldest entry in the bucket + let oldestNodeId: NodeId | undefined; + for await (const [key] of tran.iterator({ limit: 1 }, [ + ...this.nodeGraphLastUpdatedDbPath, + bucketKey, + ])) { + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( + key as unknown as Buffer, + )); + } + return oldestNodeId; + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.unsetNode(nodeId, tran), + ); + } + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; @@ -284,8 +320,14 @@ class NodeGraph { bucketIndex: NodeBucketIndex, sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', order: 'asc' | 'desc' = 'asc', - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucket(bucketIndex, sort, order, tran), + ); + } + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { throw new nodesErrors.ErrorNodeGraphBucketIndex( `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, @@ -355,8 +397,15 @@ class NodeGraph { public async *getBuckets( sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', order: 'asc' | 'desc' = 'asc', - tran: DBTransaction, + tran?: DBTransaction, ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { + if (tran == null) { + const getBuckets = (tran) => this.getBuckets(sort, order, tran); + return yield* this.db.withTransactionG(async function* (tran) { + return yield* getBuckets(tran); + }); + } + let bucketIndex: NodeBucketIndex | undefined = undefined; let bucket: NodeBucket = []; if (sort === 'nodeId' || sort === 'distance') { @@ -448,8 +497,14 @@ class NodeGraph { @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async resetBuckets( nodeIdOwn: NodeId, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.resetBuckets(nodeIdOwn, tran), + ); + } + // Setup new space const spaceNew = this.space === '0' ? '1' : '0'; const nodeGraphMetaDbPathNew = [...this.nodeGraphDbPath, 'meta' + spaceNew]; @@ -536,8 +591,14 @@ class NodeGraph { @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getBucketMeta( bucketIndex: NodeBucketIndex, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucketMeta(bucketIndex, tran), + ); + } + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { throw new nodesErrors.ErrorNodeGraphBucketIndex( `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, @@ -561,8 +622,14 @@ class NodeGraph { public async getBucketMetaProp( bucketIndex: NodeBucketIndex, key: Key, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getBucketMetaProp(bucketIndex, key, tran), + ); + } + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { throw new nodesErrors.ErrorNodeGraphBucketIndex( `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, @@ -602,8 +669,14 @@ class NodeGraph { public async getClosestNodes( nodeId: NodeId, limit: number = this.nodeBucketLimit, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.getClosestNodes(nodeId, limit, tran), + ); + } + // Buckets map to the target node in the following way; // 1. 0, 1, ..., T-1 -> T // 2. T -> 0, 1, ..., T-1 @@ -736,7 +809,7 @@ class NodeGraph { * The bucket key is the string encoded version of bucket index * that preserves lexicographic order */ - protected bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + public bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { const nodeIdOwn = this.keyManager.getNodeId(); if (nodeId.equals(nodeIdOwn)) { throw new nodesErrors.ErrorNodeGraphSameNodeId(); diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index ac9d3a4a4..d5b905cbc 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -53,6 +53,10 @@ class NodeManager { * Determines whether a node in the Polykey network is online. * @return true if online, false if offline */ + // FIXME: We shouldn't be trying to find the node just to ping it + // since we are usually pinging it during the find procedure anyway. + // I think we should be providing the address of what we're trying to ping, + // possibly make it an optional parameter? public async pingNode(targetNodeId: NodeId): Promise { const targetAddress: NodeAddress = await this.nodeConnectionManager.findNode(targetNodeId); @@ -311,7 +315,7 @@ class NodeManager { */ public async getNodeAddress( nodeId: NodeId, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { return (await this.nodeGraph.getNode(nodeId, tran))?.address; } @@ -324,7 +328,7 @@ class NodeManager { */ public async knowsNode( targetNodeId: NodeId, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { return (await this.nodeGraph.getNode(targetNodeId, tran)) != null; } @@ -334,22 +338,68 @@ class NodeManager { */ public async getBucket( bucketIndex: number, - tran?: DBTransaction, + tran: DBTransaction, ): Promise { - return await this.nodeGraph.getBucket(bucketIndex, tran); + return await this.nodeGraph.getBucket( + bucketIndex, + undefined, + undefined, + tran, + ); } /** - * Sets a node in the NodeGraph + * Adds a node to the node graph. + * Updates the node if the node already exists. + * */ public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - tran?: DBTransaction, + force = false, + tran: DBTransaction, ): Promise { - return await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // When adding a node we need to handle 3 cases + // 1. The node already exists. We need to update it's last updated field + // 2. The node doesn't exist and bucket has room. + // We need to add the node to the bucket + // 3. The node doesn't exist and the bucket is full. + // We need to ping the oldest node. If the ping succeeds we need to update + // the lastUpdated of the oldest node and drop the new one. If the ping + // fails we delete the old node and add in the new one. + const nodeData = await this.nodeGraph.getNode(nodeId, tran); + // If this is a new entry, check the bucket limit + const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); + const count = await this.nodeGraph.getBucketMetaProp( + bucketIndex, + 'count', + tran, + ); + if (nodeData != null || count < this.nodeGraph.nodeBucketLimit) { + // Either already exists or has room in the bucket + // We want to add or update the node + await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + } else { + // We want to add a node but the bucket is full + // We need to ping the oldest node + const oldestNodeId = (await this.nodeGraph.getOldestNode( + bucketIndex, + tran, + ))!; + if ((await this.pingNode(oldestNodeId)) && !force) { + // The node responded, we need to update it's info and drop the new node + const oldestNode = (await this.nodeGraph.getNode(oldestNodeId, tran))!; + await this.nodeGraph.setNode(oldestNodeId, oldestNode.address, tran); + } else { + // The node could not be contacted or force was set, + // we drop it in favor of the new node + await this.nodeGraph.unsetNode(oldestNodeId, tran); + await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + } + } } + // FIXME // /** // * Updates the node in the NodeGraph // */ @@ -364,10 +414,11 @@ class NodeManager { /** * Removes a node from the NodeGraph */ - public async unsetNode(nodeId: NodeId, tran?: DBTransaction): Promise { + public async unsetNode(nodeId: NodeId, tran: DBTransaction): Promise { return await this.nodeGraph.unsetNode(nodeId, tran); } + // FIXME // /** // * Gets all buckets from the NodeGraph // */ diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 1db803381..76bb4058a 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,12 +1,9 @@ import type { - NodeData, NodeId, NodeIdEncoded, NodeBucket, - NodeIdString, NodeBucketIndex, } from './types'; -import { utils as dbUtils } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; import { bytes2BigInt, bufferSplit } from '../utils'; diff --git a/tests/acl/ACL.test.ts b/tests/acl/ACL.test.ts index cd0658560..ec4020a1b 100644 --- a/tests/acl/ACL.test.ts +++ b/tests/acl/ACL.test.ts @@ -11,7 +11,6 @@ import ACL from '@/acl/ACL'; import * as aclErrors from '@/acl/errors'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; -import * as testUtils from '../utils'; import * as testNodesUtils from '../nodes/utils'; describe(ACL.name, () => { @@ -109,30 +108,18 @@ describe(ACL.name, () => { await expect(acl.setNodesPerm([], {} as Permission)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); - await expect(acl.setNodesPermOps([], {} as Permission)).rejects.toThrow( - aclErrors.ErrorACLNotRunning, - ); await expect(acl.setNodePerm(nodeIdX, {} as Permission)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); - await expect(acl.setNodePermOps(nodeIdX, {} as Permission)).rejects.toThrow( - aclErrors.ErrorACLNotRunning, - ); await expect(acl.unsetNodePerm(nodeIdX)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); - await expect(acl.unsetNodePermOps(nodeIdX)).rejects.toThrow( - aclErrors.ErrorACLNotRunning, - ); await expect(acl.unsetVaultPerms(1 as VaultId)).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); await expect(acl.joinNodePerm(nodeIdX, [])).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); - await expect(acl.joinNodePermOps(nodeIdX, [])).rejects.toThrow( - aclErrors.ErrorACLNotRunning, - ); await expect(acl.joinVaultPerms(1 as VaultId, [])).rejects.toThrow( aclErrors.ErrorACLNotRunning, ); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index c22475912..3ce2e7183 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -704,7 +704,7 @@ describe('${NodeConnection.name} test', () => { "should call `killSelf and throw if the server %s's during testUnaryFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -749,7 +749,7 @@ describe('${NodeConnection.name} test', () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), + grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); @@ -774,7 +774,7 @@ describe('${NodeConnection.name} test', () => { "should call `killSelf and throw if the server %s's during testStreamFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -819,7 +819,7 @@ describe('${NodeConnection.name} test', () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), + grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 0ac96ec27..d74df5c6a 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -18,6 +18,7 @@ import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; import { promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; +import * as nodesTestUtils from './utils'; describe(`${NodeManager.name} test`, () => { const password = 'password'; @@ -423,4 +424,184 @@ describe(`${NodeManager.name} test`, () => { expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); }); }); + test('should add a node when bucket has room', async () => { + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, {} as NodeAddress); + + // Checking bucket + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + }); + test('should update a node if node exists', async () => { + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 11111 as Port, + }); + + const nodeData = (await nodeGraph.getNode(nodeId))!; + await sleep(1100); + + // Should update the node + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 22222 as Port, + }); + + const newNodeData = (await nodeGraph.getNode(nodeId))!; + expect(newNodeData.address.port).not.toEqual(nodeData.address.port); + expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + }); + test('should not add node if bucket is full and old node is alive', async () => { + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + const oldestNode = await nodeGraph.getNode(oldestNodeId!); + // Waiting for a second to tick over + await sleep(1100); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was not added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeUndefined(); + // Oldest node was updated + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); + nodeManagerPingMock.mockRestore(); + }); + test('should add node if bucket is full, old node is alive and force is set', async () => { + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + }); + test('should add node if bucket is full and old node is dead', async () => { + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(false); + const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + }); }); From 8e777682a9277b0cf60a71c963021ae87854517c Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Wed, 23 Mar 2022 18:43:14 +1100 Subject: [PATCH 05/39] test: fixed `nodeGraph` `get all buckets` test fix into 7cc74d059f94739591307d9543fa73f999d3345f --- tests/nodes/NodeGraph.test.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index abf5534cd..66b958716 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -402,7 +402,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex > bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( @@ -432,7 +431,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex < bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( @@ -462,7 +460,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex > bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( @@ -494,7 +491,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex < bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( @@ -525,7 +521,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex > bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( @@ -556,7 +551,6 @@ describe(`${NodeGraph.name} test`, () => { expect(bucketIndex < bucketIndex_).toBe(true); bucketIndex_ = bucketIndex; expect(bucket.length > 0).toBe(true); - expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); for (const [nodeId, nodeData] of bucket) { expect(nodeId.byteLength).toBe(32); expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( From 685538f4ccadaff11a8d0a86a8b93aa8f6a34431 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 24 Mar 2022 13:21:22 +1100 Subject: [PATCH 06/39] feat: added `connectionEstablishedCallback` to `Proxy` Added a callback to the `Proxy` that is called when a `ForwardConnection` or `ReverseConnection` is established and authenticated. It is called with the following connection information; `remoteNodeId`, `remoteHost`, `remotePort` and `type`. They type signifies if it was a forward or reverse connection. Note that this is only triggered by composed connections. Added a test for if the callback was called when a `ReverseConnection` is established. Relates #332 Relates #344 --- src/network/Proxy.ts | 29 ++++++++- src/network/types.ts | 11 ++++ tests/network/Proxy.test.ts | 118 +++++++++++++++++++++++++++++++++++- 3 files changed, 156 insertions(+), 2 deletions(-) diff --git a/src/network/Proxy.ts b/src/network/Proxy.ts index 1a6ff46f1..973c7f525 100644 --- a/src/network/Proxy.ts +++ b/src/network/Proxy.ts @@ -1,5 +1,12 @@ import type { AddressInfo, Socket } from 'net'; -import type { Host, Port, Address, ConnectionInfo, TLSConfig } from './types'; +import type { + Host, + Port, + Address, + ConnectionInfo, + TLSConfig, + ConnectionEstablishedCallback, +} from './types'; import type { ConnectionsForward } from './ConnectionForward'; import type { NodeId } from '../nodes/types'; import type { Timer } from '../types'; @@ -48,6 +55,7 @@ class Proxy { proxy: new Map(), reverse: new Map(), }; + protected connectionEstablishedCallback: ConnectionEstablishedCallback; constructor({ authToken, @@ -56,6 +64,7 @@ class Proxy { connEndTime = 1000, connPunchIntervalTime = 1000, connKeepAliveIntervalTime = 1000, + connectionEstablishedCallback = () => {}, logger, }: { authToken: string; @@ -64,6 +73,7 @@ class Proxy { connEndTime?: number; connPunchIntervalTime?: number; connKeepAliveIntervalTime?: number; + connectionEstablishedCallback?: ConnectionEstablishedCallback; logger?: Logger; }) { this.logger = logger ?? new Logger(Proxy.name); @@ -77,6 +87,7 @@ class Proxy { this.server = http.createServer(); this.server.on('request', this.handleRequest); this.server.on('connect', this.handleConnectForward); + this.connectionEstablishedCallback = connectionEstablishedCallback; this.logger.info(`Created ${Proxy.name}`); } @@ -521,6 +532,14 @@ class Proxy { timer, ); conn.compose(clientSocket); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getServerNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'forward', + }); } protected async establishConnectionForward( @@ -687,6 +706,14 @@ class Proxy { timer, ); await conn.compose(utpConn, timer); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getClientNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'reverse', + }); } protected async establishConnectionReverse( diff --git a/src/network/types.ts b/src/network/types.ts index 40d672a85..a5a62b4c2 100644 --- a/src/network/types.ts +++ b/src/network/types.ts @@ -55,6 +55,15 @@ type ConnectionInfo = { remotePort: Port; }; +type ConnectionData = { + remoteNodeId: NodeId; + remoteHost: Host; + remotePort: Port; + type: 'forward' | 'reverse'; +}; + +type ConnectionEstablishedCallback = (data: ConnectionData) => any; + type PingMessage = { type: 'ping'; }; @@ -73,6 +82,8 @@ export type { TLSConfig, ProxyConfig, ConnectionInfo, + ConnectionData, + ConnectionEstablishedCallback, PingMessage, PongMessage, NetworkMessage, diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index fc8055ea7..7e8b12d46 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -1,6 +1,6 @@ import type { AddressInfo, Socket } from 'net'; import type { KeyPairPem } from '@/keys/types'; -import type { Host, Port } from '@/network/types'; +import type { ConnectionData, Host, Port } from '@/network/types'; import net from 'net'; import http from 'http'; import tls from 'tls'; @@ -2973,4 +2973,120 @@ describe(Proxy.name, () => { utpSocket.unref(); await serverClose(); }); + test('connectionEstablishedCallback is called when a ReverseConnection is established', async () => { + const clientKeyPair = await keysUtils.generateKeyPair(1024); + const clientKeyPairPem = keysUtils.keyPairToPem(clientKeyPair); + const clientCert = keysUtils.generateCertificate( + clientKeyPair.publicKey, + clientKeyPair.privateKey, + clientKeyPair.privateKey, + 86400, + ); + const clientCertPem = keysUtils.certToPem(clientCert); + const { + serverListen, + serverClose, + serverConnP, + serverConnEndP, + serverConnClosedP, + serverHost, + serverPort, + } = tcpServer(); + await serverListen(0, localHost); + const clientNodeId = keysUtils.certNodeId(clientCert)!; + let callbackData: ConnectionData | undefined; + const proxy = new Proxy({ + logger: logger, + authToken: '', + connectionEstablishedCallback: (data) => { + callbackData = data; + }, + }); + await proxy.start({ + serverHost: serverHost(), + serverPort: serverPort(), + proxyHost: localHost, + tlsConfig: { + keyPrivatePem: keyPairPem.privateKey, + certChainPem: certPem, + }, + }); + + const proxyHost = proxy.getProxyHost(); + const proxyPort = proxy.getProxyPort(); + const { p: clientReadyP, resolveP: resolveClientReadyP } = promise(); + const { p: clientSecureConnectP, resolveP: resolveClientSecureConnectP } = + promise(); + const { p: clientCloseP, resolveP: resolveClientCloseP } = promise(); + const utpSocket = UTP({ allowHalfOpen: true }); + const utpSocketBind = promisify(utpSocket.bind).bind(utpSocket); + const handleMessage = async (data: Buffer) => { + const msg = networkUtils.unserializeNetworkMessage(data); + if (msg.type === 'ping') { + resolveClientReadyP(); + await send(networkUtils.pongBuffer); + } + }; + utpSocket.on('message', handleMessage); + const send = async (data: Buffer) => { + const utpSocketSend = promisify(utpSocket.send).bind(utpSocket); + await utpSocketSend(data, 0, data.byteLength, proxyPort, proxyHost); + }; + await utpSocketBind(0, localHost); + const utpSocketPort = utpSocket.address().port; + await proxy.openConnectionReverse( + localHost, + utpSocketPort as Port, + ); + const utpConn = utpSocket.connect(proxyPort, proxyHost); + const tlsSocket = tls.connect( + { + key: Buffer.from(clientKeyPairPem.privateKey, 'ascii'), + cert: Buffer.from(clientCertPem, 'ascii'), + socket: utpConn, + rejectUnauthorized: false, + }, + () => { + resolveClientSecureConnectP(); + }, + ); + let tlsSocketEnded = false; + tlsSocket.on('end', () => { + tlsSocketEnded = true; + if (utpConn.destroyed) { + tlsSocket.destroy(); + } else { + tlsSocket.end(); + tlsSocket.destroy(); + } + }); + tlsSocket.on('close', () => { + resolveClientCloseP(); + }); + await send(networkUtils.pingBuffer); + expect(proxy.getConnectionReverseCount()).toBe(1); + await clientReadyP; + await clientSecureConnectP; + await serverConnP; + await proxy.closeConnectionReverse( + localHost, + utpSocketPort as Port, + ); + expect(proxy.getConnectionReverseCount()).toBe(0); + await clientCloseP; + await serverConnEndP; + await serverConnClosedP; + expect(tlsSocketEnded).toBe(true); + utpSocket.off('message', handleMessage); + utpSocket.close(); + utpSocket.unref(); + await proxy.stop(); + await serverClose(); + + // Checking callback data + expect(callbackData?.remoteNodeId.equals(clientNodeId)).toBe(true); + expect(callbackData?.remoteHost).toEqual(localHost); + expect(callbackData?.remotePort).toEqual(utpSocketPort); + expect(callbackData?.type).toEqual('reverse'); + }); }); From e18a748091da269a8b639689781a393d0fd2144f Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 24 Mar 2022 18:10:28 +1100 Subject: [PATCH 07/39] feat: `Proxy` trigger adding nodes to `Nodegraph` Added an event to the `EventBus` that is triggered by the `Proxy`'s `connectionEstablishedCallback`. this adds the node to the `NodeGraph`. Related #344 --- src/PolykeyAgent.ts | 28 +++++++++++++++++++++- tests/nodes/NodeManager.test.ts | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 68e3b5571..a3f5241f3 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -1,6 +1,6 @@ import type { FileSystem } from './types'; import type { PolykeyWorkerManagerInterface } from './workers/types'; -import type { Host, Port } from './network/types'; +import type { ConnectionData, Host, Port } from './network/types'; import type { SeedNodes } from './nodes/types'; import type { KeyManagerChangeData } from './keys/types'; import path from 'path'; @@ -8,6 +8,7 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; +import * as networkUtils from '@/network/utils'; import KeyManager from './keys/KeyManager'; import Status from './status/Status'; import Schema from './schema/Schema'; @@ -59,8 +60,10 @@ class PolykeyAgent { */ public static readonly eventSymbols = { [KeyManager.name]: Symbol(KeyManager.name), + [Proxy.name]: Symbol(Proxy.name), } as { readonly KeyManager: unique symbol; + readonly Proxy: unique symbol; }; public static async createPolykeyAgent({ @@ -266,6 +269,8 @@ class PolykeyAgent { proxy ?? new Proxy({ ...proxyConfig_, + connectionEstablishedCallback: (data) => + events.emitAsync(PolykeyAgent.eventSymbols.Proxy, data), logger: logger.getChild(Proxy.name), }); nodeGraph = @@ -542,6 +547,27 @@ class PolykeyAgent { this.logger.info(`${KeyManager.name} change propagated`); }, ); + this.events.on( + PolykeyAgent.eventSymbols.Proxy, + async (data: ConnectionData) => { + if (data.type === 'reverse') { + const address = networkUtils.buildAddress( + data.remoteHost, + data.remotePort, + ); + const nodeIdEncoded = nodesUtils.encodeNodeId(data.remoteNodeId); + this.logger.info( + `Reverse connection adding ${nodeIdEncoded}:${address} to ${NodeGraph.name}`, + ); + // Reverse connection was established and authenticated, + // add it to the node graph + await this.nodeManager.setNode(data.remoteNodeId, { + host: data.remoteHost, + port: data.remotePort, + }); + } + }, + ); const networkConfig_ = { ...config.defaults.networkConfig, ...utils.filterEmptyObject(networkConfig), diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index d74df5c6a..8d2c249b8 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -18,6 +18,7 @@ import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; import { promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as nodesTestUtils from './utils'; describe(`${NodeManager.name} test`, () => { @@ -604,4 +605,45 @@ describe(`${NodeManager.name} test`, () => { expect(oldestNodeNew).toBeUndefined(); nodeManagerPingMock.mockRestore(); }); + test('should add node when an incoming connection is established', async () => { + let server: PolykeyAgent | undefined; + try { + server = await PolykeyAgent.createPolykeyAgent({ + password: 'password', + nodePath: path.join(dataDir, 'server'), + keysConfig: { + rootKeyPairBits: 2048, + }, + logger: logger, + }); + const serverNodeId = server.keyManager.getNodeId(); + const serverNodeAddress: NodeAddress = { + host: server.proxy.getProxyHost(), + port: server.proxy.getProxyPort(), + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + + const expectedHost = proxy.getProxyHost(); + const expectedPort = proxy.getProxyPort(); + const expectedNodeId = keyManager.getNodeId(); + + const nodeData = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData).toBeUndefined(); + + // Now we want to connect to the server by making an echo request. + await nodeConnectionManager.withConnF(serverNodeId, async (conn) => { + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello')); + }); + + const nodeData2 = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData2).toBeDefined(); + expect(nodeData2?.address.host).toEqual(expectedHost); + expect(nodeData2?.address.port).toEqual(expectedPort); + } finally { + // Clean up + await server?.stop(); + await server?.destroy(); + } + }); }); From 22c4974dc3bc7f5daf6f1236c545f5d787355222 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 25 Mar 2022 18:02:09 +1100 Subject: [PATCH 08/39] feat: added optional timeout timers to `NodeConnectionManager` methods In some cases we want to specify how long we attempt to connect to a node on a per-connection basis. Related #363 --- src/PolykeyClient.ts | 8 +- src/agent/GRPCClientAgent.ts | 7 +- src/client/GRPCClientClient.ts | 7 +- src/grpc/GRPCClient.ts | 17 ++- src/nodes/NodeConnection.ts | 7 +- src/nodes/NodeConnectionManager.ts | 119 ++++++++++++------ src/nodes/NodeManager.ts | 1 + tests/agent/GRPCClientAgent.test.ts | 5 +- tests/agent/utils.ts | 3 +- tests/client/GRPCClientClient.test.ts | 3 +- tests/client/utils.ts | 4 +- tests/grpc/GRPCClient.test.ts | 19 +-- tests/grpc/utils/GRPCClientTest.ts | 7 +- tests/nodes/NodeConnection.test.ts | 15 +-- .../NodeConnectionManager.lifecycle.test.ts | 1 + 15 files changed, 143 insertions(+), 80 deletions(-) diff --git a/src/PolykeyClient.ts b/src/PolykeyClient.ts index b124feefa..bea2b830b 100644 --- a/src/PolykeyClient.ts +++ b/src/PolykeyClient.ts @@ -1,4 +1,4 @@ -import type { FileSystem } from './types'; +import type { FileSystem, Timer } from './types'; import type { NodeId } from './nodes/types'; import type { Host, Port } from './network/types'; @@ -29,7 +29,7 @@ class PolykeyClient { nodePath = config.defaults.nodePath, session, grpcClient, - timeout, + timer, fs = require('fs'), logger = new Logger(this.name), fresh = false, @@ -38,7 +38,7 @@ class PolykeyClient { host: Host; port: Port; nodePath?: string; - timeout?: number; + timer?: Timer; session?: Session; grpcClient?: GRPCClientClient; fs?: FileSystem; @@ -66,7 +66,7 @@ class PolykeyClient { port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, session, - timeout, + timer, logger: logger.getChild(GRPCClientClient.name), })); const pkClient = new PolykeyClient({ diff --git a/src/agent/GRPCClientAgent.ts b/src/agent/GRPCClientAgent.ts index bb4593785..db94979db 100644 --- a/src/agent/GRPCClientAgent.ts +++ b/src/agent/GRPCClientAgent.ts @@ -10,6 +10,7 @@ import type * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import type * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; import type * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; import type * as notificationsPB from '../proto/js/polykey/v1/notifications/notifications_pb'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as agentErrors from './errors'; @@ -32,7 +33,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -41,7 +42,7 @@ class GRPCClientAgent extends GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -53,7 +54,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, logger, }); const grpcClientAgent = new GRPCClientAgent({ diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index 2b1b905db..8b9816536 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -14,6 +14,7 @@ import type * as identitiesPB from '../proto/js/polykey/v1/identities/identities import type * as keysPB from '../proto/js/polykey/v1/keys/keys_pb'; import type * as permissionsPB from '../proto/js/polykey/v1/permissions/permissions_pb'; import type * as secretsPB from '../proto/js/polykey/v1/secrets/secrets_pb'; +import type { Timer } from '../types'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import Logger from '@matrixai/logger'; import * as clientErrors from './errors'; @@ -38,7 +39,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -48,7 +49,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig?: Partial; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -64,7 +65,7 @@ class GRPCClientClient extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); diff --git a/src/grpc/GRPCClient.ts b/src/grpc/GRPCClient.ts index 4e88291a1..0434a1753 100644 --- a/src/grpc/GRPCClient.ts +++ b/src/grpc/GRPCClient.ts @@ -9,6 +9,7 @@ import type { import type { NodeId } from '../nodes/types'; import type { Certificate } from '../keys/types'; import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; +import type { Timer } from '../types'; import http2 from 'http2'; import Logger from '@matrixai/logger'; import * as grpc from '@grpc/grpc-js'; @@ -44,7 +45,7 @@ abstract class GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, interceptors = [], logger = new Logger(this.name), }: { @@ -58,7 +59,7 @@ abstract class GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; interceptors?: Array; logger?: Logger; }): Promise<{ @@ -123,9 +124,17 @@ abstract class GRPCClient { } const waitForReady = promisify(client.waitForReady).bind(client); // Add the current unix time because grpc expects the milliseconds since unix epoch - timeout += Date.now(); try { - await waitForReady(timeout); + if (timer != null) { + await Promise.race([timer.timerP, waitForReady(Infinity)]); + // If the timer resolves first we throw a timeout error + if (timer?.timedOut === true) { + throw new grpcErrors.ErrorGRPCClientTimeout(); + } + } else { + // No timer given so we wait forever + await waitForReady(Infinity); + } } catch (e) { // If we fail here then we leak the client object... client.close(); diff --git a/src/nodes/NodeConnection.ts b/src/nodes/NodeConnection.ts index f79272413..f4f4fbfb3 100644 --- a/src/nodes/NodeConnection.ts +++ b/src/nodes/NodeConnection.ts @@ -5,6 +5,7 @@ import type { Certificate, PublicKey, PublicKeyPem } from '../keys/types'; import type Proxy from '../network/Proxy'; import type GRPCClient from '../grpc/GRPCClient'; import type NodeConnectionManager from './NodeConnectionManager'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as asyncInit from '@matrixai/async-init'; @@ -38,7 +39,7 @@ class NodeConnection { targetHost, targetPort, targetHostname, - connConnectTime = 20000, + timer, proxy, keyManager, clientFactory, @@ -50,7 +51,7 @@ class NodeConnection { targetHost: Host; targetPort: Port; targetHostname?: Hostname; - connConnectTime?: number; + timer?: Timer; proxy: Proxy; keyManager: KeyManager; clientFactory: (...args) => Promise; @@ -125,7 +126,7 @@ class NodeConnection { await nodeConnection.destroy(); } }, - timeout: connConnectTime, + timer: timer, }), holePunchPromises, ]); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index c20f093f3..5b5810492 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -26,6 +26,7 @@ import * as networkUtils from '../network/utils'; import * as agentErrors from '../agent/errors'; import * as grpcErrors from '../grpc/errors'; import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; +import { timerStart } from '../utils'; type ConnectionAndTimer = { connection: NodeConnection; @@ -123,14 +124,22 @@ class NodeConnectionManager { * itself is such that we can pass targetNodeId as a parameter (as opposed to * an acquire function with no parameters). * @param targetNodeId Id of target node to communicate with + * @param timer Connection timeout timer + * @param address Optional address to connect to * @returns ResourceAcquire Resource API for use in with contexts */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async acquireConnection( targetNodeId: NodeId, + timer?: Timer, + address?: NodeAddress, ): Promise>> { return async () => { - const { connection, timer } = await this.getConnection(targetNodeId); + const { connection, timer } = await this.getConnection( + targetNodeId, + address, + timer, + ); // Acquire the read lock and the release function const [release] = await this.connectionLocks.lock([ targetNodeId.toString(), @@ -164,14 +173,16 @@ class NodeConnectionManager { * for use with normal arrow function * @param targetNodeId Id of target node to communicate with * @param f Function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async withConnF( targetNodeId: NodeId, f: (conn: NodeConnection) => Promise, + timer?: Timer, ): Promise { return await withF( - [await this.acquireConnection(targetNodeId)], + [await this.acquireConnection(targetNodeId, timer, undefined)], async ([conn]) => { this.logger.info( `withConnF calling function with connection to ${nodesUtils.encodeNodeId( @@ -190,6 +201,7 @@ class NodeConnectionManager { * for use with a generator function * @param targetNodeId Id of target node to communicate with * @param g Generator function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async *withConnG( @@ -197,8 +209,13 @@ class NodeConnectionManager { g: ( conn: NodeConnection, ) => AsyncGenerator, + timer?: Timer, ): AsyncGenerator { - const acquire = await this.acquireConnection(targetNodeId); + const acquire = await this.acquireConnection( + targetNodeId, + timer, + undefined, + ); const [release, conn] = await acquire(); let caughtError; try { @@ -216,10 +233,14 @@ class NodeConnectionManager { * Create a connection to another node (without performing any function). * This is a NOOP if a connection already exists. * @param targetNodeId Id of node we are creating connection to - * @returns ConnectionAndLock that was created or exists in the connection map. + * @param address Optional address to connect to + * @param timer Connection timeout timer + * @returns ConnectionAndLock that was created or exists in the connection map */ protected async getConnection( targetNodeId: NodeId, + address?: NodeAddress, + timer?: Timer, ): Promise { this.logger.info( `Getting connection to ${nodesUtils.encodeNodeId(targetNodeId)}`, @@ -273,7 +294,7 @@ class NodeConnectionManager { keyManager: this.keyManager, nodeConnectionManager: this, destroyCallback, - connConnectTime: this.connConnectTime, + timer: timer ?? timerStart(this.connConnectTime), logger: this.logger.getChild( `${NodeConnection.name} ${targetHost}:${targetAddress.port}`, ), @@ -281,13 +302,13 @@ class NodeConnectionManager { GRPCClientAgent.createGRPCClientAgent(args), }); // Creating TTL timeout - const timer = setTimeout(async () => { + const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); }, this.connTimeoutTime); const newConnAndTimer: ConnectionAndTimer = { connection: newConnection, - timer: timer, + timer: timeToLiveTimer, }; this.connections.set(targetNodeIdString, newConnAndTimer); return newConnAndTimer; @@ -396,11 +417,13 @@ class NodeConnectionManager { * port). * @param targetNodeId ID of the node attempting to be found (i.e. attempting * to find its IP address and port) + * @param timer Connection timeout timer * @returns whether the target node was located in the process */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getClosestGlobalNodes( targetNodeId: NodeId, + timer?: Timer, ): Promise { // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; @@ -441,7 +464,7 @@ class NodeConnectionManager { // call to getConnectionToNode // FIXME: no tran await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); - await this.getConnection(nextNodeId); + await this.getConnection(nextNodeId, undefined, timer); } catch (e) { // If we can't connect to the node, then skip it continue; @@ -451,11 +474,12 @@ class NodeConnectionManager { const foundClosest = await this.getRemoteNodeClosestNodes( nextNodeId, targetNodeId, + timer, ); // Check to see if any of these are the target node. At the same time, add // them to the shortlist for (const [nodeId, nodeData] of foundClosest) { - // Ignore any nodes that have been contacted + // Ignore a`ny nodes that have been contacted if (contacted[nodeId]) { continue; } @@ -494,40 +518,46 @@ class NodeConnectionManager { * target node ID. * @param nodeId the node ID to search on * @param targetNodeId the node ID to find other nodes closest to it + * @param timer Connection timeout timer * @returns list of nodes and their IP/port that are closest to the target */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, + timer?: Timer, ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); // Send through client - return this.withConnF(nodeId, async (connection) => { - const client = await connection.getClient(); - const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array<[NodeId, NodeData]> = []; - // Loop over each map element (from the returned response) and populate nodes - response.getNodeTableMap().forEach((address, nodeIdString: string) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdString); - // If the nodeId is not valid we don't add it to the list of nodes - if (nodeId != null) { - nodes.push([ - nodeId, - { - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, + return this.withConnF( + nodeId, + async (connection) => { + const client = await connection.getClient(); + const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); + const nodes: Array<[NodeId, NodeData]> = []; + // Loop over each map element (from the returned response) and populate nodes + response.getNodeTableMap().forEach((address, nodeIdString: string) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdString); + // If the nodeId is not valid we don't add it to the list of nodes + if (nodeId != null) { + nodes.push([ + nodeId, + { + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + lastUpdated: 0, // FIXME? }, - lastUpdated: 0, // FIXME? - }, - ]); - } - }); - return nodes; - }); + ]); + } + }); + return nodes; + }, + timer, + ); } /** @@ -541,13 +571,14 @@ class NodeConnectionManager { * This has been removed from start() as there's a chicken-egg scenario * where we require the NodeGraph instance to be created in order to get * connections. + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async syncNodeGraph() { + public async syncNodeGraph(timer?: Timer) { for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { - await this.getConnection(seedNodeId); + await this.getConnection(seedNodeId, undefined, timer); } catch (e) { if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; throw e; @@ -556,6 +587,7 @@ class NodeConnectionManager { const nodes = await this.getRemoteNodeClosestNodes( seedNodeId, this.keyManager.getNodeId(), + timer, ); for (const [nodeId, nodeData] of nodes) { // FIXME: this should be the `nodeManager.setNode` @@ -574,6 +606,7 @@ class NodeConnectionManager { * @param targetNodeId node ID of the target node to hole punch * @param proxyAddress string of address in the form `proxyHost:proxyPort` * @param signature signature to verify source node is sender (signature based + * @param timer Connection timeout timer * on proxyAddress as message) */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @@ -583,16 +616,21 @@ class NodeConnectionManager { targetNodeId: NodeId, proxyAddress: string, signature: Buffer, + timer?: Timer, ): Promise { const relayMsg = new nodesPB.Relay(); relayMsg.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMsg.setTargetId(nodesUtils.encodeNodeId(targetNodeId)); relayMsg.setProxyAddress(proxyAddress); relayMsg.setSignature(signature.toString()); - await this.withConnF(relayNodeId, async (connection) => { - const client = connection.getClient(); - await client.nodesHolePunchMessageSend(relayMsg); - }); + await this.withConnF( + relayNodeId, + async (connection) => { + const client = connection.getClient(); + await client.nodesHolePunchMessageSend(relayMsg); + }, + timer, + ); } /** @@ -602,15 +640,20 @@ class NodeConnectionManager { * node). * @param message the original relay message (assumed to be created in * nodeConnection.start()) + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async relayHolePunchMessage(message: nodesPB.Relay): Promise { + public async relayHolePunchMessage( + message: nodesPB.Relay, + timer?: Timer, + ): Promise { await this.sendHolePunchMessage( validationUtils.parseNodeId(message.getTargetId()), validationUtils.parseNodeId(message.getSrcId()), validationUtils.parseNodeId(message.getTargetId()), message.getProxyAddress(), Buffer.from(message.getSignature()), + timer, ); } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index d5b905cbc..9ef964b3f 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -7,6 +7,7 @@ import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 60a84410c..89c9ec9d7 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -21,6 +21,7 @@ import NotificationsManager from '@/notifications/NotificationsManager'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as agentErrors from '@/agent/errors'; import * as keysUtils from '@/keys/utils'; +import { timerStart } from '@/utils'; import * as testAgentUtils from './utils'; describe(GRPCClientAgent.name, () => { @@ -257,7 +258,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy1.getForwardPort(), authToken: clientProxy1.authToken, }, - timeout: 5000, + timer: timerStart(5000), logger, }); @@ -291,7 +292,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy2.getForwardPort(), authToken: clientProxy2.authToken, }, - timeout: 5000, + timer: timerStart(5000), }); }); afterEach(async () => { diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index 7712d0fa8..afa61c0c0 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -19,6 +19,7 @@ import { createAgentService, GRPCClientAgent, } from '@/agent'; +import { timerStart } from '@/utils'; import * as testNodesUtils from '../nodes/utils'; async function openTestAgentServer({ @@ -94,7 +95,7 @@ async function openTestAgentClient( logger: logger, destroyCallback: async () => {}, proxyConfig, - timeout: 30000, + timer: timerStart(30000), }); } diff --git a/tests/client/GRPCClientClient.test.ts b/tests/client/GRPCClientClient.test.ts index bb083f816..b90406a80 100644 --- a/tests/client/GRPCClientClient.test.ts +++ b/tests/client/GRPCClientClient.test.ts @@ -11,6 +11,7 @@ import Session from '@/sessions/Session'; import * as keysUtils from '@/keys/utils'; import * as clientErrors from '@/client/errors'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as testClientUtils from './utils'; import * as testUtils from '../utils'; @@ -76,7 +77,7 @@ describe(GRPCClientClient.name, () => { port: port as Port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, logger: logger, - timeout: 10000, + timer: timerStart(10000), session: session, }); await client.destroy(); diff --git a/tests/client/utils.ts b/tests/client/utils.ts index 036257328..9af325dba 100644 --- a/tests/client/utils.ts +++ b/tests/client/utils.ts @@ -11,7 +11,7 @@ import { } from '@/proto/js/polykey/v1/client_service_grpc_pb'; import createClientService from '@/client/service'; import PolykeyClient from '@/PolykeyClient'; -import { promisify } from '@/utils'; +import { promisify, timerStart } from '@/utils'; import * as grpcUtils from '@/grpc/utils'; async function openTestClientServer({ @@ -82,7 +82,7 @@ async function openTestClientClient( port: port, fs, logger, - timeout: 30000, + timer: timerStart(30000), }); return pkc; diff --git a/tests/grpc/GRPCClient.test.ts b/tests/grpc/GRPCClient.test.ts index e18f301e6..bf252bc6d 100644 --- a/tests/grpc/GRPCClient.test.ts +++ b/tests/grpc/GRPCClient.test.ts @@ -16,6 +16,7 @@ import * as keysUtils from '@/keys/utils'; import * as grpcErrors from '@/grpc/errors'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as utils from './utils'; import * as testNodesUtils from '../nodes/utils'; import { expectRemoteError } from '../utils'; @@ -110,7 +111,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); await client.destroy(); @@ -124,7 +125,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const m = new utilsPB.EchoMessage(); @@ -157,7 +158,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); let pCall: PromiseUnaryCall; @@ -193,7 +194,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -236,7 +237,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -261,7 +262,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream, response] = client.clientStream(); @@ -299,7 +300,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream] = client.clientStream(); @@ -322,7 +323,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); @@ -357,7 +358,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); diff --git a/tests/grpc/utils/GRPCClientTest.ts b/tests/grpc/utils/GRPCClientTest.ts index e3c5f9489..3b2af291d 100644 --- a/tests/grpc/utils/GRPCClientTest.ts +++ b/tests/grpc/utils/GRPCClientTest.ts @@ -5,6 +5,7 @@ import type { Host, Port, TLSConfig, ProxyConfig } from '@/network/types'; import type * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; import type { AsyncGeneratorReadableStreamClient } from '@/grpc/types'; +import type { Timer } from '@/types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import GRPCClient from '@/grpc/GRPCClient'; @@ -22,7 +23,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback, logger = new Logger(this.name), }: { @@ -32,7 +33,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig?: TLSConfig; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -48,7 +49,7 @@ class GRPCClientTest extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 3ce2e7183..5ef4ed470 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -33,6 +33,7 @@ import * as GRPCErrors from '@/grpc/errors'; import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; +import { timerStart } from '@/utils'; import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; import * as grpcTestUtils from '../grpc/utils'; @@ -483,7 +484,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -518,7 +519,7 @@ describe('${NodeConnection.name} test', () => { targetNodeId: targetNodeId, targetHost: '128.0.0.1' as Host, targetPort: 12345 as Port, - connConnectTime: 300, + timer: timerStart(300), proxy: clientProxy, keyManager: clientKeyManager, nodeConnectionManager: dummyNodeConnectionManager, @@ -593,7 +594,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -636,7 +637,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -674,7 +675,7 @@ describe('${NodeConnection.name} test', () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -736,7 +737,7 @@ describe('${NodeConnection.name} test', () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, @@ -806,7 +807,7 @@ describe('${NodeConnection.name} test', () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 6117ddc41..49239bb72 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -17,6 +17,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; +import { timerStart } from '@/utils'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { const logger = new Logger( From 44434ebddbc5d25b07937bc6812b3c1be268fcde Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 25 Mar 2022 18:20:05 +1100 Subject: [PATCH 09/39] refactor: updated implementation of `nodePing` Added `nodePing` command to `NodeConnectionManager` and `NodeManager.nodePing` calls that now. the new `nodePing` command essentially attempts to create a connection to the target node. In this process we authenticate the connection and check that the nodeIds match. If no address is provided it will default to trying to find the node through kademlia. Related #322 --- src/nodes/NodeConnectionManager.ts | 44 +++++ src/nodes/NodeManager.ts | 33 +--- .../NodeConnectionManager.lifecycle.test.ts | 166 +++++++++++++++++- 3 files changed, 216 insertions(+), 27 deletions(-) diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 5b5810492..c190c20d3 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -666,6 +666,50 @@ class NodeConnectionManager { (nodeIdEncoded) => nodesUtils.decodeNodeId(nodeIdEncoded)!, ); } + + /** + * Checks if a connection can be made to the target. Returns true if the + * connection can be authenticated, it's certificate matches the nodeId and + * the addresses match if provided. Otherwise returns false. + * @param nodeId - NodeId of the target + * @param address - Optional address of the target + * @param connConnectTime - Optional timeout for making the connection. + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + connConnectTime?: number, + ): Promise { + // If we can create a connection then we have punched though the NAT, + // authenticated and confimed the nodeId matches + let connAndLock: ConnectionAndLock; + try { + connAndLock = await this.createConnection( + nodeId, + address, + connConnectTime, + ); + } catch (e) { + if ( + e instanceof nodesErrors.ErrorNodeConnectionDestroyed || + e instanceof nodesErrors.ErrorNodeConnectionTimeout || + e instanceof grpcErrors.ErrorGRPC || + e instanceof agentErrors.ErrorAgentClientDestroyed + ) { + // Failed to connect, returning false + return false; + } + throw e; + } + const remoteHost = connAndLock.connection?.host; + const remotePort = connAndLock.connection?.port; + // If address wasn't set then nothing to check + if (address == null) return true; + // Check if the address information match in case there was an + // existing connection + return address.host === remoteHost && address.port === remotePort; + } } export default NodeConnectionManager; diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 9ef964b3f..96db367a8 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -53,31 +53,16 @@ class NodeManager { /** * Determines whether a node in the Polykey network is online. * @return true if online, false if offline + * @param nodeId - NodeId of the node we're pinging + * @param address - Optional Host and Port we want to ping + * @param timeout - Optional timeout */ - // FIXME: We shouldn't be trying to find the node just to ping it - // since we are usually pinging it during the find procedure anyway. - // I think we should be providing the address of what we're trying to ping, - // possibly make it an optional parameter? - public async pingNode(targetNodeId: NodeId): Promise { - const targetAddress: NodeAddress = - await this.nodeConnectionManager.findNode(targetNodeId); - try { - // Attempt to open a connection via the forward proxy - // i.e. no NodeConnection object created (no need for GRPCClient) - await this.nodeConnectionManager.holePunchForward( - targetNodeId, - await networkUtils.resolveHost(targetAddress.host), - targetAddress.port, - ); - } catch (e) { - // If the connection request times out, then return false - if (e instanceof networkErrors.ErrorConnectionStart) { - return false; - } - // Throw any other error back up the callstack - throw e; - } - return true; + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + timeout?: number, + ): Promise { + return this.nodeConnectionManager.pingNode(nodeId, address, timeout); } /** diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 49239bb72..5871fa4d3 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -1,4 +1,9 @@ -import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; +import type { + NodeAddress, + NodeId, + NodeIdString, + SeedNodes, +} from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import fs from 'fs'; import path from 'path'; @@ -100,7 +105,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode1'), }); @@ -110,7 +115,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode2'), }); @@ -499,4 +504,159 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { await nodeConnectionManager?.stop(); } }); + + // New ping tests + test('should ping node', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + // @ts-ignore: kidnap connections + const connections = nodeConnectionManager.connections; + await expect(nodeConnectionManager.pingNode(remoteNodeId1)).resolves.toBe( + true, + ); + const finalConnLock = connections.get( + remoteNodeId1.toString() as NodeIdString, + ); + // Check entry is in map and lock is released + expect(finalConnLock).toBeDefined(); + expect(finalConnLock?.lock.isLocked()).toBeFalsy(); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should ping node with address', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + const remoteNodeAddress1: NodeAddress = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + await nodeConnectionManager.pingNode(remoteNodeId1, remoteNodeAddress1); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should ping node with address when connection exists', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + const remoteNodeAddress1: NodeAddress = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + await nodeConnectionManager.pingNode(remoteNodeId1, remoteNodeAddress1); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping non existent node', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + + // Pinging node + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + { host: '127.1.2.3' as Host, port: 55555 as Port }, + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping node with wrong address when connection exists', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + await nodeConnectionManager.withConnF(remoteNodeId1, nop); + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + { host: '127.1.2.3' as Host, port: 55555 as Port }, + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping node if NodeId does not match', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start(); + const remoteNodeAddress1: NodeAddress = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + const remoteNodeAddress2: NodeAddress = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNodeAddress2, + timerStart(1000), + ), + ).toEqual(false); + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId2, + remoteNodeAddress1, + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); }); From b62c664f8b8b70425ce686527c5aa0ab21d29632 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Mon, 28 Mar 2022 18:19:09 +1100 Subject: [PATCH 10/39] feat: `NodeManager.setNode` authenticates the added node `setNode` now authenticates the node you are trying to add. Added a flag for skipping this authentication as well as a timeout timer for the authentication. this is shared between authentication new node and the old node if the bucket is full. Related #322 --- src/nodes/NodeConnectionManager.ts | 16 ++++------------ src/nodes/NodeManager.ts | 26 ++++++++++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index c190c20d3..9d1f7302c 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -673,23 +673,15 @@ class NodeConnectionManager { * the addresses match if provided. Otherwise returns false. * @param nodeId - NodeId of the target * @param address - Optional address of the target - * @param connConnectTime - Optional timeout for making the connection. + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async pingNode( - nodeId: NodeId, - address?: NodeAddress, - connConnectTime?: number, - ): Promise { + public async pingNode(nodeId: NodeId, address?: NodeAddress, timer?: Timer): Promise { // If we can create a connection then we have punched though the NAT, - // authenticated and confimed the nodeId matches + // authenticated and confirmed the nodeId matches let connAndLock: ConnectionAndLock; try { - connAndLock = await this.createConnection( - nodeId, - address, - connConnectTime, - ); + connAndLock = await this.createConnection(nodeId, address, timer); } catch (e) { if ( e instanceof nodesErrors.ErrorNodeConnectionDestroyed || diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 96db367a8..9ac3eac2c 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -55,14 +55,10 @@ class NodeManager { * @return true if online, false if offline * @param nodeId - NodeId of the node we're pinging * @param address - Optional Host and Port we want to ping - * @param timeout - Optional timeout + * @param timer Connection timeout timer */ - public async pingNode( - nodeId: NodeId, - address?: NodeAddress, - timeout?: number, - ): Promise { - return this.nodeConnectionManager.pingNode(nodeId, address, timeout); + public async pingNode(nodeId: NodeId, address?: NodeAddress, timer?: Timer): Promise { + return this.nodeConnectionManager.pingNode(nodeId, address, timer); } /** @@ -337,14 +333,24 @@ class NodeManager { /** * Adds a node to the node graph. * Updates the node if the node already exists. - * + * @param nodeId - Id of the node we wish to add + * @param nodeAddress - Expected address of the node we want to add + * @param authenticate - Flag for if we want to authenticate the node we're adding + * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. + * This will drop the oldest node in favor of the new. + * @param timer Connection timeout timer */ public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - force = false, + authenticate: boolean = true, + force: boolean = false, + timer?: Timer, tran: DBTransaction, ): Promise { + // if we fail to ping and authenticate the new node we return + // skip if force is true or authenticate is false + if (!force && authenticate && !(await this.pingNode(nodeId, nodeAddress, timer))) return // When adding a node we need to handle 3 cases // 1. The node already exists. We need to update it's last updated field // 2. The node doesn't exist and bucket has room. @@ -372,7 +378,7 @@ class NodeManager { bucketIndex, tran, ))!; - if ((await this.pingNode(oldestNodeId)) && !force) { + if ((await this.pingNode(oldestNodeId, undefined, timer)) && !force) { // The node responded, we need to update it's info and drop the new node const oldestNode = (await this.nodeGraph.getNode(oldestNodeId, tran))!; await this.nodeGraph.setNode(oldestNodeId, oldestNode.address, tran); From 00eefec742495445c9f63f2ac96578da3e57f934 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 29 Mar 2022 12:12:37 +1100 Subject: [PATCH 11/39] syntax: general linting and fixes from api changes --- src/PolykeyAgent.ts | 2 +- src/bin/nodes/CommandGetAll.ts | 12 ++++-------- src/client/service/nodesGetAll.ts | 5 ++--- src/nodes/NodeConnectionManager.ts | 9 ++++++++- src/nodes/NodeManager.ts | 18 +++++++++++++----- src/nodes/types.ts | 4 ++-- tests/client/service/keysKeyPairRenew.test.ts | 4 ++-- tests/client/service/keysKeyPairReset.test.ts | 4 ++-- tests/client/service/nodesAdd.test.ts | 3 +-- .../NodeConnectionManager.general.test.ts | 18 ++++++++++-------- tests/utils.ts | 19 ++++++++++--------- tests/vaults/VaultInternal.test.ts | 4 ++-- tests/vaults/VaultManager.test.ts | 8 ++++---- 13 files changed, 61 insertions(+), 49 deletions(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index a3f5241f3..792a8778b 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -8,7 +8,7 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; -import * as networkUtils from '@/network/utils'; +import * as networkUtils from './network/utils'; import KeyManager from './keys/KeyManager'; import Status from './status/Status'; import Schema from './schema/Schema'; diff --git a/src/bin/nodes/CommandGetAll.ts b/src/bin/nodes/CommandGetAll.ts index 5d1b5a8fc..243991fc9 100644 --- a/src/bin/nodes/CommandGetAll.ts +++ b/src/bin/nodes/CommandGetAll.ts @@ -43,14 +43,10 @@ class CommandGetAll extends CommandPolykey { logger: this.logger.getChild(PolykeyClient.name), }); const emptyMessage = new utilsPB.EmptyMessage(); - try { - result = await binUtils.retryAuthentication( - (auth) => pkClient.grpcClient.nodesGetAll(emptyMessage, auth), - meta, - ); - } catch (err) { - throw err; - } + result = await binUtils.retryAuthentication( + (auth) => pkClient.grpcClient.nodesGetAll(emptyMessage, auth), + meta, + ); let output: any = {}; for (const [bucketIndex, bucket] of result.getBucketsMap().entries()) { output[bucketIndex] = {}; diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts index 6a658fedd..bc01e84e0 100644 --- a/src/client/service/nodesGetAll.ts +++ b/src/client/service/nodesGetAll.ts @@ -1,6 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; import type { Authenticate } from '../types'; -import type { NodeGraph } from '../../nodes'; import type { KeyManager } from '../../keys'; import type { NodeId } from '../../nodes/types'; import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; @@ -13,11 +12,11 @@ import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; * Retrieves all nodes from all buckets in the NodeGraph. */ function nodesGetAll({ - nodeGraph, + // NodeGraph, keyManager, authenticate, }: { - nodeGraph: NodeGraph; + // NodeGraph: NodeGraph; keyManager: KeyManager; authenticate: Authenticate; }) { diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 9d1f7302c..31540aaa8 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -10,6 +10,9 @@ import type { NodeId, NodeIdString, SeedNodes, + NodeEntry, + NodeBucket, + NodeIdString, } from './types'; import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; @@ -676,7 +679,11 @@ class NodeConnectionManager { * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async pingNode(nodeId: NodeId, address?: NodeAddress, timer?: Timer): Promise { + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + timer?: Timer, + ): Promise { // If we can create a connection then we have punched though the NAT, // authenticated and confirmed the nodeId matches let connAndLock: ConnectionAndLock; diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 9ac3eac2c..4bb1d05ab 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -14,8 +14,6 @@ import * as nodesUtils from './utils'; import * as validationUtils from '../validation/utils'; import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; -import * as networkErrors from '../network/errors'; -import * as networkUtils from '../network/utils'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; @@ -57,7 +55,11 @@ class NodeManager { * @param address - Optional Host and Port we want to ping * @param timer Connection timeout timer */ - public async pingNode(nodeId: NodeId, address?: NodeAddress, timer?: Timer): Promise { + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + timer?: Timer, + ): Promise { return this.nodeConnectionManager.pingNode(nodeId, address, timer); } @@ -348,9 +350,15 @@ class NodeManager { timer?: Timer, tran: DBTransaction, ): Promise { - // if we fail to ping and authenticate the new node we return + // If we fail to ping and authenticate the new node we return // skip if force is true or authenticate is false - if (!force && authenticate && !(await this.pingNode(nodeId, nodeAddress, timer))) return + if ( + !force && + authenticate && + !(await this.pingNode(nodeId, nodeAddress, timer)) + ) { + return; + } // When adding a node we need to handle 3 cases // 1. The node already exists. We need to update it's last updated field // 2. The node doesn't exist and bucket has room. diff --git a/src/nodes/types.ts b/src/nodes/types.ts index 683143e83..8e173b4f2 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -1,5 +1,5 @@ import type { Id } from '@matrixai/id'; -import type { Opaque, NonFunctionProperties } from '../types'; +import type { Opaque } from '../types'; import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; @@ -33,7 +33,7 @@ type NodeBucketMeta = { count: number; }; -type NodeBucketMetaProps = NonFunctionProperties; +// Type NodeBucketMetaProps = NonFunctionProperties; // Just make the bucket entries also // bucketIndex anot as a key diff --git a/tests/client/service/keysKeyPairRenew.test.ts b/tests/client/service/keysKeyPairRenew.test.ts index 8a792254b..a36c621c1 100644 --- a/tests/client/service/keysKeyPairRenew.test.ts +++ b/tests/client/service/keysKeyPairRenew.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairRenew', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairRenew', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/keysKeyPairReset.test.ts b/tests/client/service/keysKeyPairReset.test.ts index 8c41064b1..335d5c5fd 100644 --- a/tests/client/service/keysKeyPairReset.test.ts +++ b/tests/client/service/keysKeyPairReset.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairReset', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairReset', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index 86923b961..e0a07e171 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -166,8 +166,7 @@ describe('nodesAdd', () => { )!, ); expect(result).toBeDefined(); - expect(result!.host).toBe('127.0.0.1'); - expect(result!.port).toBe(11111); + expect(result!.address).toBe('127.0.0.1:11111'); }); test('cannot add invalid node', async () => { // Invalid host diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index 24986923b..d13c838de 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -1,4 +1,4 @@ -import type { NodeAddress, NodeData, NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeAddress, NodeBucket, NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import fs from 'fs'; import path from 'path'; @@ -362,7 +362,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; + const addedClosestNodes: NodeBucket = []; for (let i = 1; i < 101; i += 5) { const closeNodeId = testNodesUtils.generateNodeIdForBucket( targetNodeId, @@ -373,11 +373,13 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: i as Port, }; await serverPKAgent.nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(targetNodeId, closeNodeId), - }); + addedClosestNodes.push([ + closeNodeId, + { + address: nodeAddress, + lastUpdated: 0, + }, + ]); } // Now create and add 10 more nodes that are far away from this node for (let i = 1; i <= 10; i++) { @@ -396,7 +398,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { ); // Sort the received nodes on distance such that we can check its equality // with addedClosestNodes - closest.sort(nodesUtils.sortByDistance); + nodesUtils.bucketSortByDistance(closest, targetNodeId); expect(closest.length).toBe(20); expect(closest).toEqual(addedClosestNodes); } finally { diff --git a/tests/utils.ts b/tests/utils.ts index ea2d11ff9..c7636c4a5 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -1,20 +1,21 @@ -import type { StatusLive } from '@/status/types'; +// Import type { StatusLive } from '@/status/types'; import type { NodeId } from '@/nodes/types'; -import type { Host } from '@/network/types'; +// import type { Host } from '@/network/types'; import path from 'path'; import fs from 'fs'; import lock from 'fd-lock'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; -import PolykeyAgent from '@/PolykeyAgent'; -import Status from '@/status/Status'; -import GRPCClientClient from '@/client/GRPCClientClient'; -import * as clientUtils from '@/client/utils'; +// Import PolykeyAgent from '@/PolykeyAgent'; +// import Status from '@/status/Status'; +// import GRPCClientClient from '@/client/GRPCClientClient'; +// import * as clientUtils from '@/client/utils'; import * as keysUtils from '@/keys/utils'; -import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +// Import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as grpcErrors from '@/grpc/errors'; import { sleep } from '@/utils'; -import config from '@/config'; +import * as errors from '@/errors'; +// import config from '@/config'; /** * Setup the global keypair @@ -86,7 +87,7 @@ async function setupGlobalKeypair() { // * * Ensure server-side side-effects are removed at the end of each test // */ async function setupGlobalAgent( - logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ + _logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), ): Promise { diff --git a/tests/vaults/VaultInternal.test.ts b/tests/vaults/VaultInternal.test.ts index 86c283baf..d91978c5a 100644 --- a/tests/vaults/VaultInternal.test.ts +++ b/tests/vaults/VaultInternal.test.ts @@ -15,7 +15,7 @@ import * as vaultsErrors from '@/vaults/errors'; import { sleep } from '@/utils'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -39,7 +39,7 @@ describe('VaultInternal', () => { const fakeKeyManager = { getNodeId: () => { - return testsUtils.generateRandomNodeId(); + return nodeTestUtils.generateRandomNodeId(); }, } as KeyManager; const secret1 = { name: 'secret-1', content: 'secret-content-1' }; diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index e4ed618aa..f37dfba38 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -30,7 +30,7 @@ import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import { sleep } from '@/utils'; import VaultInternal from '@/vaults/VaultInternal'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; import { expectRemoteError } from '../utils'; const mockedGenerateDeterministicKeyPair = jest @@ -65,7 +65,7 @@ describe('VaultManager', () => { let db: DB; // We only ever use this to get NodeId, No need to create a whole one - const nodeId = testsUtils.generateRandomNodeId(); + const nodeId = nodeTestUtils.generateRandomNodeId(); const dummyKeyManager = { getNodeId: () => nodeId, } as KeyManager; @@ -1394,8 +1394,8 @@ describe('VaultManager', () => { }); try { // Setting up state - const nodeId1 = testsUtils.generateRandomNodeId(); - const nodeId2 = testsUtils.generateRandomNodeId(); + const nodeId1 = nodeTestUtils.generateRandomNodeId(); + const nodeId2 = nodeTestUtils.generateRandomNodeId(); await gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(nodeId1), chain: {}, From 062f4e329a8ae2827c3cc30b6d27161274da995f Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 31 Mar 2022 14:04:03 +1100 Subject: [PATCH 12/39] fix: squash into ping node changes. Updated `NodeConnectionManager.pingNode` to just use the proxy connection. #322 --- src/nodes/NodeConnectionManager.ts | 72 +++++++++++++++--------------- src/nodes/NodeManager.ts | 13 +++++- 2 files changed, 47 insertions(+), 38 deletions(-) diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 31540aaa8..43f703988 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -135,14 +135,9 @@ class NodeConnectionManager { public async acquireConnection( targetNodeId: NodeId, timer?: Timer, - address?: NodeAddress, ): Promise>> { return async () => { - const { connection, timer } = await this.getConnection( - targetNodeId, - address, - timer, - ); + const { connection, timer } = await this.getConnection(targetNodeId, timer); // Acquire the read lock and the release function const [release] = await this.connectionLocks.lock([ targetNodeId.toString(), @@ -185,7 +180,7 @@ class NodeConnectionManager { timer?: Timer, ): Promise { return await withF( - [await this.acquireConnection(targetNodeId, timer, undefined)], + [await this.acquireConnection(targetNodeId, timer)], async ([conn]) => { this.logger.info( `withConnF calling function with connection to ${nodesUtils.encodeNodeId( @@ -214,11 +209,7 @@ class NodeConnectionManager { ) => AsyncGenerator, timer?: Timer, ): AsyncGenerator { - const acquire = await this.acquireConnection( - targetNodeId, - timer, - undefined, - ); + const acquire = await this.acquireConnection(targetNodeId, timer); const [release, conn] = await acquire(); let caughtError; try { @@ -236,13 +227,11 @@ class NodeConnectionManager { * Create a connection to another node (without performing any function). * This is a NOOP if a connection already exists. * @param targetNodeId Id of node we are creating connection to - * @param address Optional address to connect to * @param timer Connection timeout timer * @returns ConnectionAndLock that was created or exists in the connection map */ protected async getConnection( targetNodeId: NodeId, - address?: NodeAddress, timer?: Timer, ): Promise { this.logger.info( @@ -467,7 +456,7 @@ class NodeConnectionManager { // call to getConnectionToNode // FIXME: no tran await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); - await this.getConnection(nextNodeId, undefined, timer); + await this.getConnection(nextNodeId, timer); } catch (e) { // If we can't connect to the node, then skip it continue; @@ -581,7 +570,7 @@ class NodeConnectionManager { for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { - await this.getConnection(seedNodeId, undefined, timer); + await this.getConnection(seedNodeId, timer); } catch (e) { if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; throw e; @@ -675,39 +664,48 @@ class NodeConnectionManager { * connection can be authenticated, it's certificate matches the nodeId and * the addresses match if provided. Otherwise returns false. * @param nodeId - NodeId of the target - * @param address - Optional address of the target + * @param host - Host of the target node + * @param port - Port of the target node * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async pingNode( nodeId: NodeId, - address?: NodeAddress, + host: Host, + port: Port, timer?: Timer, ): Promise { // If we can create a connection then we have punched though the NAT, // authenticated and confirmed the nodeId matches - let connAndLock: ConnectionAndLock; + const proxyAddress = networkUtils.buildAddress( + this.proxy.getProxyHost(), + this.proxy.getProxyPort(), + ); + const signature = await this.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const holePunchPromises = Array.from(this.getSeedNodes(), (seedNodeId) => { + return this.sendHolePunchMessage( + seedNodeId, + this.keyManager.getNodeId(), + nodeId, + proxyAddress, + signature, + ); + }); + const forwardPunchPromise = this.holePunchForward( + nodeId, + host, + port, + timer, + ); + try { - connAndLock = await this.createConnection(nodeId, address, timer); + await Promise.all([forwardPunchPromise, ...holePunchPromises]); } catch (e) { - if ( - e instanceof nodesErrors.ErrorNodeConnectionDestroyed || - e instanceof nodesErrors.ErrorNodeConnectionTimeout || - e instanceof grpcErrors.ErrorGRPC || - e instanceof agentErrors.ErrorAgentClientDestroyed - ) { - // Failed to connect, returning false - return false; - } - throw e; + return false; } - const remoteHost = connAndLock.connection?.host; - const remotePort = connAndLock.connection?.port; - // If address wasn't set then nothing to check - if (address == null) return true; - // Check if the address information match in case there was an - // existing connection - return address.host === remoteHost && address.port === remotePort; + return true; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 4bb1d05ab..d18b406de 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -11,6 +11,7 @@ import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; +import * as networkUtils from '../network/utils'; import * as validationUtils from '../validation/utils'; import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; @@ -60,7 +61,17 @@ class NodeManager { address?: NodeAddress, timer?: Timer, ): Promise { - return this.nodeConnectionManager.pingNode(nodeId, address, timer); + // We need to attempt a connection using the proxies + // For now we will just do a forward connect + relay message + const targetAddress = + address ?? (await this.nodeConnectionManager.findNode(nodeId)); + const targetHost = await networkUtils.resolveHost(targetAddress.host); + return await this.nodeConnectionManager.pingNode( + nodeId, + targetHost, + targetAddress.port, + timer, + ); } /** From 45731ed91f8c161adcdba8d390aa74fc4de4991e Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 31 Mar 2022 14:23:11 +1100 Subject: [PATCH 13/39] fix: setNode now does not ping the new node #322 --- src/nodes/NodeManager.ts | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index d18b406de..4e1e12914 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -344,11 +344,10 @@ class NodeManager { } /** - * Adds a node to the node graph. + * Adds a node to the node graph. This assumes that you have already authenticated the node. * Updates the node if the node already exists. * @param nodeId - Id of the node we wish to add * @param nodeAddress - Expected address of the node we want to add - * @param authenticate - Flag for if we want to authenticate the node we're adding * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. * @param timer Connection timeout timer @@ -356,20 +355,10 @@ class NodeManager { public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - authenticate: boolean = true, force: boolean = false, timer?: Timer, tran: DBTransaction, ): Promise { - // If we fail to ping and authenticate the new node we return - // skip if force is true or authenticate is false - if ( - !force && - authenticate && - !(await this.pingNode(nodeId, nodeAddress, timer)) - ) { - return; - } // When adding a node we need to handle 3 cases // 1. The node already exists. We need to update it's last updated field // 2. The node doesn't exist and bucket has room. From f8b33200d859ead8d8cfb83eed8de1dec5a6dc2c Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 31 Mar 2022 15:16:37 +1100 Subject: [PATCH 14/39] feat: setNode concurrently pings multiple nodes `setNode` now pings 3 nodes concurrently, updating ones that respond and removing ones that don't. If there is room in the bucket afterwards then we add the new node. #322 --- src/nodes/NodeGraph.ts | 20 ++++------ src/nodes/NodeManager.ts | 69 ++++++++++++++++++++++----------- tests/nodes/NodeManager.test.ts | 6 +-- 3 files changed, 57 insertions(+), 38 deletions(-) diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 0bf30f3ae..d6c6be1c6 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -260,26 +260,22 @@ class NodeGraph { @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getOldestNode( bucketIndex: number, + limit: number = 1, tran?: DBTransaction, - ): Promise { + ): Promise> { if (tran == null) { return this.db.withTransactionF(async (tran) => - this.getOldestNode(bucketIndex, tran), + this.getOldestNode(bucketIndex, limit, tran), ); } - const bucketKey = nodesUtils.bucketKey(bucketIndex); // Remove the oldest entry in the bucket - let oldestNodeId: NodeId | undefined; - for await (const [key] of tran.iterator({ limit: 1 }, [ - ...this.nodeGraphLastUpdatedDbPath, - bucketKey, - ])) { - ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( - key as unknown as Buffer, - )); + const oldestNodeIds: Array = []; + for await (const [key] of tran.iterator({ limit }, [...this.nodeGraphLastUpdatedDbPath, bucketKey])) { + const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(key as unknown as Buffer); + oldestNodeIds.push(nodeId); } - return oldestNodeId; + return oldestNodeIds; } @ready(new nodesErrors.ErrorNodeGraphNotRunning()) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 4e1e12914..8aa576e3f 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -68,8 +68,10 @@ class NodeManager { const targetHost = await networkUtils.resolveHost(targetAddress.host); return await this.nodeConnectionManager.pingNode( nodeId, - targetHost, - targetAddress.port, + { + host: targetHost, + port: targetAddress.port, + }, timer, ); } @@ -351,6 +353,7 @@ class NodeManager { * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. * @param timer Connection timeout timer + * @param tran */ public async setNode( nodeId: NodeId, @@ -382,35 +385,55 @@ class NodeManager { } else { // We want to add a node but the bucket is full // We need to ping the oldest node - const oldestNodeId = (await this.nodeGraph.getOldestNode( + const oldestNodeIds = (await this.nodeGraph.getOldestNode( bucketIndex, + 3, tran, ))!; - if ((await this.pingNode(oldestNodeId, undefined, timer)) && !force) { - // The node responded, we need to update it's info and drop the new node - const oldestNode = (await this.nodeGraph.getNode(oldestNodeId, tran))!; - await this.nodeGraph.setNode(oldestNodeId, oldestNode.address, tran); - } else { - // The node could not be contacted or force was set, - // we drop it in favor of the new node - await this.nodeGraph.unsetNode(oldestNodeId, tran); + if (force) { + // We just add the new node anyway without checking the old one + const oldNodeId = oldestNodeIds[0]; + await this.nodeGraph.unsetNode(oldNodeId, tran); + await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + return; + } + // We want to concurrently ping the nodes + const pingPromises = oldestNodeIds.map((nodeId) => { + const doPing = async (): Promise<{ + nodeId: NodeId; + success: boolean; + }> => { + // This needs to return nodeId and ping result + const data = await this.nodeGraph.getNode(nodeId, tran); + if (data == null) return { nodeId, success: false }; + const result = await this.pingNode(nodeId, undefined, timer); + return { nodeId, success: result }; + }; + return doPing(); + }); + const pingResults = await Promise.all(pingPromises); + for (const { nodeId, success } of pingResults) { + if (success) { + // Ping succeeded, update the node + const node = (await this.nodeGraph.getNode(nodeId, tran))!; + await this.nodeGraph.setNode(nodeId, node.address, tran); + } else { + // Otherwise we remove the node + await this.nodeGraph.unsetNode(nodeId, tran); + } + } + // Check if we now have room and add the new node + const count = await this.nodeGraph.getBucketMetaProp( + bucketIndex, + 'count', + tran, + ); + if (count < this.nodeGraph.nodeBucketLimit) { await this.nodeGraph.setNode(nodeId, nodeAddress, tran); } } } - // FIXME - // /** - // * Updates the node in the NodeGraph - // */ - // public async updateNode( - // nodeId: NodeId, - // nodeAddress?: NodeAddress, - // tran?: DBTransaction, - // ): Promise { - // return await this.nodeGraph.updateNode(nodeId, nodeAddress, tran); - // } - /** * Removes a node from the NodeGraph */ diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 8d2c249b8..da7451d99 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -506,7 +506,7 @@ describe(`${NodeManager.name} test`, () => { // Mocking ping const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); nodeManagerPingMock.mockResolvedValue(true); - const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); const oldestNode = await nodeGraph.getNode(oldestNodeId!); // Waiting for a second to tick over await sleep(1100); @@ -550,7 +550,7 @@ describe(`${NodeManager.name} test`, () => { // Mocking ping const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); nodeManagerPingMock.mockResolvedValue(true); - const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); // Adding a new node with bucket full await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); // Bucket still contains max nodes @@ -591,7 +591,7 @@ describe(`${NodeManager.name} test`, () => { // Mocking ping const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); nodeManagerPingMock.mockResolvedValue(false); - const oldestNodeId = await nodeGraph.getOldestNode(bucketIndex); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); // Adding a new node with bucket full await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); // Bucket still contains max nodes From faf712dc27235521b343f26fca220c9f1318ea39 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 31 Mar 2022 19:17:27 +1100 Subject: [PATCH 15/39] feat: nodeManager is now startStop #322 --- src/nodes/NodeManager.ts | 15 +++++++++++++++ src/nodes/errors.ts | 6 ++++++ 2 files changed, 21 insertions(+) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 8aa576e3f..ea800fcfb 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -9,6 +9,7 @@ import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; import Logger from '@matrixai/logger'; +import { StartStop } from '@matrixai/async-init/dist/StartStop'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -18,6 +19,8 @@ import * as claimsErrors from '../claims/errors'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; +interface NodeManager extends StartStop {} +@StartStop() class NodeManager { protected db: DB; protected logger: Logger; @@ -49,6 +52,18 @@ class NodeManager { this.nodeGraph = nodeGraph; } + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + this.setNodeQueueRunner = this.startSetNodeQueue(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + await this.stopSetNodeQueue(); + this.logger.info(`Stopped ${this.constructor.name}`); + } + /** * Determines whether a node in the Polykey network is online. * @return true if online, false if offline diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index 83a5597d4..863e19a37 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -2,6 +2,11 @@ import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} +class ErrorNodeManagerNotRunning extends ErrorNodes { + static description = 'NodeManager is not running'; + exitCode = sysexits.USAGE; +} + class ErrorNodeGraphRunning extends ErrorNodes { static description = 'NodeGraph is running'; exitCode = sysexits.USAGE; @@ -74,6 +79,7 @@ class ErrorNodeConnectionHostWildcard extends ErrorNodes { export { ErrorNodes, + ErrorNodeManagerNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, From 55e7dd71776ebada0f31f2e56f638017ed5b98b1 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 31 Mar 2022 19:23:22 +1100 Subject: [PATCH 16/39] feat: async queueing for setting nodes `setNode` now has a `blocking` flag that defaults to false. If it encounters a full bucket when adding a node then it will add the operation to the queue and asynchronously trys a blocking `setNode` in the background. `setNode`s will only be added to the queue if the bucket was full. #322 --- src/nodes/NodeGraph.ts | 15 +++ src/nodes/NodeManager.ts | 218 ++++++++++++++++++++++++++------ tests/nodes/NodeManager.test.ts | 193 +++++++++++++++++++++++++++- 3 files changed, 383 insertions(+), 43 deletions(-) diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index d6c6be1c6..d7437b389 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -230,6 +230,11 @@ class NodeGraph { nodesUtils.bucketDbKey(nodeId), ]); if (nodeData != null) { + this.logger.debug( + `Updating node ${nodesUtils.encodeNodeId( + nodeId, + )} in bucket ${bucketIndex}`, + ); // If the node already exists we want to remove the old `lastUpdated` const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( nodeData.lastUpdated, @@ -237,6 +242,11 @@ class NodeGraph { ); await tran.del([...lastUpdatedPath, lastUpdatedKey]); } else { + this.logger.debug( + `Adding node ${nodesUtils.encodeNodeId( + nodeId, + )} to bucket ${bucketIndex}`, + ); // It didn't exist so we want to increment the bucket count const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); @@ -294,6 +304,11 @@ class NodeGraph { nodesUtils.bucketDbKey(nodeId), ]); if (nodeData != null) { + this.logger.debug( + `Removing node ${nodesUtils.encodeNodeId( + nodeId, + )} from bucket ${bucketIndex}`, + ); const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); await tran.del([...bucketPath, nodesUtils.bucketDbKey(nodeId)]); diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index ea800fcfb..f94c5315d 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -9,7 +9,7 @@ import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; import Logger from '@matrixai/logger'; -import { StartStop } from '@matrixai/async-init/dist/StartStop'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -18,6 +18,7 @@ import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; +import { timerStart } from '../utils/utils'; interface NodeManager extends StartStop {} @StartStop() @@ -28,6 +29,18 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; + // SetNodeQueue + protected endQueue: boolean = false; + protected setNodeQueue: Array<{ + nodeId: NodeId; + nodeAddress: NodeAddress; + timeout?: number; + }> = []; + protected setNodeQueuePlug: Promise; + protected setNodeQueueUnplug: (() => void) | undefined; + protected setNodeQueueRunner: Promise; + protected setNodeQueueEmpty: Promise; + protected setNodeQueueDrained: () => void; constructor({ db, @@ -365,16 +378,19 @@ class NodeManager { * Updates the node if the node already exists. * @param nodeId - Id of the node we wish to add * @param nodeAddress - Expected address of the node we want to add + * @param blocking - Flag for if the operation should block or utilize the async queue * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. - * @param timer Connection timeout timer + * @param timeout Connection timeout timeout * @param tran */ + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, + blocking: boolean = false, force: boolean = false, - timer?: Timer, + timeout?: number, tran: DBTransaction, ): Promise { // When adding a node we need to handle 3 cases @@ -400,53 +416,87 @@ class NodeManager { } else { // We want to add a node but the bucket is full // We need to ping the oldest node - const oldestNodeIds = (await this.nodeGraph.getOldestNode( - bucketIndex, - 3, - tran, - ))!; if (force) { // We just add the new node anyway without checking the old one - const oldNodeId = oldestNodeIds[0]; + const oldNodeId = ( + await this.nodeGraph.getOldestNode(bucketIndex, 1, tran) + ).pop()!; + this.logger.debug( + `Force was set, removing ${nodesUtils.encodeNodeId( + oldNodeId, + )} and adding ${nodesUtils.encodeNodeId(nodeId)}`, + ); await this.nodeGraph.unsetNode(oldNodeId, tran); await this.nodeGraph.setNode(nodeId, nodeAddress, tran); return; } - // We want to concurrently ping the nodes - const pingPromises = oldestNodeIds.map((nodeId) => { - const doPing = async (): Promise<{ - nodeId: NodeId; - success: boolean; - }> => { - // This needs to return nodeId and ping result - const data = await this.nodeGraph.getNode(nodeId, tran); - if (data == null) return { nodeId, success: false }; - const result = await this.pingNode(nodeId, undefined, timer); - return { nodeId, success: result }; - }; - return doPing(); - }); - const pingResults = await Promise.all(pingPromises); - for (const { nodeId, success } of pingResults) { - if (success) { - // Ping succeeded, update the node - const node = (await this.nodeGraph.getNode(nodeId, tran))!; - await this.nodeGraph.setNode(nodeId, node.address, tran); - } else { - // Otherwise we remove the node - await this.nodeGraph.unsetNode(nodeId, tran); - } + if (blocking) { + this.logger.debug( + `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( + nodeId, + )}`, + ); + await this.garbageCollectOldNode( + bucketIndex, + nodeId, + nodeAddress, + timeout, + ); + } else { + this.logger.debug( + `Bucket was full and blocking was false, adding ${nodesUtils.encodeNodeId( + nodeId, + )} to queue`, + ); + // Re-attempt this later asynchronously by adding the the queue + this.queueSetNode(nodeId, nodeAddress, timeout); } - // Check if we now have room and add the new node - const count = await this.nodeGraph.getBucketMetaProp( - bucketIndex, - 'count', - tran, - ); - if (count < this.nodeGraph.nodeBucketLimit) { - await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + } + } + + private async garbageCollectOldNode( + bucketIndex: number, + nodeId: NodeId, + nodeAddress: NodeAddress, + timeout?: number, + ) { + const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3, tran); + // We want to concurrently ping the nodes + const pingPromises = oldestNodeIds.map((nodeId) => { + const doPing = async (): Promise<{ + nodeId: NodeId; + success: boolean; + }> => { + // This needs to return nodeId and ping result + const data = await this.nodeGraph.getNode(nodeId); + if (data == null) return { nodeId, success: false }; + const timer = timeout != null ? timerStart(timeout) : undefined; + const result = await this.pingNode(nodeId, nodeAddress, timer); + return { nodeId, success: result }; + }; + return doPing(); + }); + const pingResults = await Promise.all(pingPromises); + for (const { nodeId, success } of pingResults) { + if (success) { + // Ping succeeded, update the node + this.logger.debug( + `Ping succeeded for ${nodesUtils.encodeNodeId(nodeId)}`, + ); + const node = (await this.nodeGraph.getNode(nodeId))!; + await this.nodeGraph.setNode(nodeId, node.address); + } else { + this.logger.debug(`Ping failed for ${nodesUtils.encodeNodeId(nodeId)}`); + // Otherwise we remove the node + await this.nodeGraph.unsetNode(nodeId); } } + // Check if we now have room and add the new node + const count = await this.nodeGraph.getBucketMetaProp(bucketIndex, 'count'); + if (count < this.nodeGraph.nodeBucketLimit) { + this.logger.debug(`Bucket ${bucketIndex} now has room, adding new node`); + await this.nodeGraph.setNode(nodeId, nodeAddress); + } } /** @@ -473,6 +523,92 @@ class NodeManager { throw Error('fixme'); // Return await this.nodeGraph.refreshBuckets(tran); } + + // SetNode queue + + /** + * This adds a setNode operation to the queue + */ + private queueSetNode( + nodeId: NodeId, + nodeAddress: NodeAddress, + timeout?: number, + ): void { + this.logger.debug(`Adding ${nodesUtils.encodeNodeId(nodeId)} to queue`); + this.setNodeQueue.push({ + nodeId, + nodeAddress, + timeout, + }); + this.unplugQueue(); + } + + /** + * This starts the process of digesting the queue + */ + private async startSetNodeQueue(): Promise { + this.logger.debug('Starting setNodeQueue'); + this.plugQueue(); + // While queue hasn't ended + while (true) { + // Wait for queue to be unplugged + await this.setNodeQueuePlug; + if (this.endQueue) break; + const job = this.setNodeQueue.shift(); + if (job == null) { + // If the queue is empty then we pause the queue + this.plugQueue(); + continue; + } + // Process the job + this.logger.debug( + `SetNodeQueue processing job for: ${nodesUtils.encodeNodeId( + job.nodeId, + )}`, + ); + await this.setNode(job.nodeId, job.nodeAddress, true, false, job.timeout); + } + this.logger.debug('SetNodeQueue has ended'); + } + + private async stopSetNodeQueue(): Promise { + this.logger.debug('Stopping setNodeQueue'); + // Tell the queue runner to end + this.endQueue = true; + this.unplugQueue(); + // Wait for runner to finish it's current job + await this.setNodeQueueRunner; + } + + private plugQueue(): void { + if (this.setNodeQueueUnplug == null) { + this.logger.debug('Plugging setNodeQueue'); + // Pausing queue + this.setNodeQueuePlug = new Promise((resolve) => { + this.setNodeQueueUnplug = resolve; + }); + // Signaling queue is empty + if (this.setNodeQueueDrained != null) this.setNodeQueueDrained(); + } + } + + private unplugQueue(): void { + if (this.setNodeQueueUnplug != null) { + this.logger.debug('Unplugging setNodeQueue'); + // Starting queue + this.setNodeQueueUnplug(); + this.setNodeQueueUnplug = undefined; + // Signalling queue is running + this.setNodeQueueEmpty = new Promise((resolve) => { + this.setNodeQueueDrained = resolve; + }); + } + } + + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + public async queueDrained(): Promise { + await this.setNodeQueueEmpty; + } } export default NodeManager; diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index da7451d99..4e4c2f62b 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -16,14 +16,15 @@ import NodeManager from '@/nodes/NodeManager'; import Proxy from '@/network/Proxy'; import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; -import { promisify, sleep } from '@/utils'; +import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as nodesTestUtils from './utils'; +import { generateNodeIdForBucket } from './utils'; describe(`${NodeManager.name} test`, () => { const password = 'password'; - const logger = new Logger(`${NodeManager.name} test`, LogLevel.WARN, [ + const logger = new Logger(`${NodeManager.name} test`, LogLevel.DEBUG, [ new StreamHandler(), ]); let dataDir: string; @@ -39,14 +40,22 @@ describe(`${NodeManager.name} test`, () => { const serverHost = '::1' as Host; const externalHost = '127.0.0.1' as Host; + const localhost = '127.0.0.1' as Host; + const port = 55556 as Port; const serverPort = 0 as Port; const externalPort = 0 as Port; const mockedGenerateDeterministicKeyPair = jest.spyOn( keysUtils, 'generateDeterministicKeyPair', ); + const mockedPingNode = jest.fn(); // Jest.spyOn(NodeManager.prototype, 'pingNode'); + const dummyNodeConnectionManager = { + pingNode: mockedPingNode, + } as unknown as NodeConnectionManager; beforeEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { return keysUtils.generateKeyPair(bits); }); @@ -110,6 +119,8 @@ describe(`${NodeManager.name} test`, () => { await nodeConnectionManager.start(); }); afterEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); @@ -646,4 +657,182 @@ describe(`${NodeManager.name} test`, () => { await server?.destroy(); } }); + test('should not add nodes to full bucket if pings succeeds', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + logger, + }); + await nodeManager.start(); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings succeed, node not added + mockedPingNode.mockImplementation(async (_) => true); + const newNode = generateNodeIdForBucket(nodeId, 100, 21); + await nodeManager.setNode(newNode, address); + expect(await listBucket(100)).not.toContain( + nodesUtils.encodeNodeId(newNode), + ); + + // Clean up + await nodeManager.queueDrained(); + await nodeManager.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + }); + test('should add nodes to full bucket if pings fail', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + logger, + }); + await nodeManager.start(); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings fail, new nodes get added + mockedPingNode.mockImplementation(async (_) => false); + const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); + const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); + const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); + await nodeManager.setNode(newNode1, address); + await nodeManager.setNode(newNode2, address); + await nodeManager.setNode(newNode3, address); + await nodeManager.queueDrained(); + const list = await listBucket(100); + expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); + + // Clean up + await nodeManager.queueDrained(); + await nodeManager.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + }); + test('should not block when bucket is full', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + logger, + }); + await nodeManager.start(); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node does not block + const delayPing = promise(); + mockedPingNode.mockImplementation(async (_) => { + await delayPing.p; + return true; + }); + const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode4, address), + ).resolves.toBeUndefined(); + delayPing.resolveP(null); + await nodeManager.queueDrained(); + + // Clean up + await nodeManager.queueDrained(); + await nodeManager.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + }); + test('should block when blocking is set to true', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + logger, + }); + await nodeManager.start(); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node can block + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => { + return true; + }); + const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode5, address, true), + ).resolves.toBeUndefined(); + expect(mockedPingNode).toBeCalled(); + + // CLean up + await nodeManager.queueDrained(); + await nodeManager.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + }); }); From 9ed6e2b4c20665e8fb67db83a08985f705cd5d64 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 7 Apr 2022 17:04:08 +1000 Subject: [PATCH 17/39] feat: establishing a `NodeConnection` adds the node to the nodeGraph `NodeConnectionManager` now takes `NodeGraph` in the `nodeConnectionManager.start` method. It has to be part of the start method since they are co-dependent. `NodeConnectionManager` cals `NodeManager.setNode()` when a connection is established. This fulfills the condition of adding a node to the graph during a forward connection. Fixed up tests that were failing in relation to the `NodeManager` `StartStop` conversion. #322 --- src/PolykeyAgent.ts | 5 +- src/nodes/NodeConnectionManager.ts | 10 +- tests/agent/GRPCClientAgent.test.ts | 4 +- tests/agent/service/notificationsSend.test.ts | 3 +- .../gestaltsDiscoveryByIdentity.test.ts | 4 +- .../service/gestaltsDiscoveryByNode.test.ts | 4 +- .../gestaltsGestaltTrustByIdentity.test.ts | 4 +- .../gestaltsGestaltTrustByNode.test.ts | 4 +- tests/client/service/identitiesClaim.test.ts | 4 +- tests/client/service/nodesAdd.test.ts | 3 +- tests/client/service/nodesClaim.test.ts | 3 +- tests/client/service/nodesFind.test.ts | 3 +- tests/client/service/nodesPing.test.ts | 2 +- .../client/service/notificationsClear.test.ts | 3 +- .../client/service/notificationsRead.test.ts | 3 +- .../client/service/notificationsSend.test.ts | 3 +- tests/discovery/Discovery.test.ts | 4 +- tests/nodes/NodeConnection.test.ts | 5 +- .../NodeConnectionManager.general.test.ts | 14 +- .../NodeConnectionManager.lifecycle.test.ts | 130 +---- .../NodeConnectionManager.seednodes.test.ts | 10 +- .../NodeConnectionManager.termination.test.ts | 20 +- .../NodeConnectionManager.timeout.test.ts | 8 +- tests/nodes/NodeManager.test.ts | 501 +++++++++--------- .../NotificationsManager.test.ts | 3 +- tests/vaults/VaultManager.test.ts | 15 +- 26 files changed, 385 insertions(+), 387 deletions(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 792a8778b..ddc7ca2cd 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -301,6 +301,7 @@ class PolykeyAgent { nodeConnectionManager, logger: logger.getChild(NodeManager.name), }); + await nodeManager.start(); discovery = discovery ?? (await Discovery.createDiscovery({ @@ -646,7 +647,8 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); - await this.nodeConnectionManager.start(); + await this.nodeManager.start(); + await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); await this.nodeConnectionManager.syncNodeGraph(); await this.discovery.start({ fresh }); @@ -701,6 +703,7 @@ class PolykeyAgent { await this.discovery.stop(); await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); + await this.nodeManager.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 43f703988..544fd5687 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -15,6 +15,7 @@ import type { NodeIdString, } from './types'; import { withF } from '@matrixai/resources'; +import type NodeManager from './NodeManager'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; import { IdInternal } from '@matrixai/id'; @@ -58,6 +59,8 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; + // NodeManager has to be passed in during start to allow co-dependency + protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; /** * Data structure to store all NodeConnections. If a connection to a node n does @@ -101,8 +104,9 @@ class NodeConnectionManager { this.connTimeoutTime = connTimeoutTime; } - public async start() { + public async start({ nodeManager }: { nodeManager: NodeManager }) { this.logger.info(`Starting ${this.constructor.name}`); + this.nodeManager = nodeManager; for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); // FIXME: also fine implicit transactions @@ -112,6 +116,7 @@ class NodeConnectionManager { public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); + this.nodeManager = undefined; for (const [nodeId, connAndLock] of this.connections) { if (connAndLock == null) continue; if (connAndLock.connection == null) continue; @@ -293,6 +298,9 @@ class NodeConnectionManager { clientFactory: async (args) => GRPCClientAgent.createGRPCClientAgent(args), }); + // We can assume connection was established and destination was valid, + // we can add the target to the nodeGraph + await this.nodeManager?.setNode(targetNodeId, targetAddress); // Creating TTL timeout const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 89c9ec9d7..5cc38708a 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -116,7 +116,6 @@ describe(GRPCClientAgent.name, () => { proxy, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db: db, sigchain: sigchain, @@ -125,6 +124,8 @@ describe(GRPCClientAgent.name, () => { nodeConnectionManager: nodeConnectionManager, logger: logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl: acl, @@ -176,6 +177,7 @@ describe(GRPCClientAgent.name, () => { await notificationsManager.stop(); await sigchain.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index c0b79e91c..46a9aa07e 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -117,7 +117,6 @@ describe('notificationsSend', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -126,6 +125,8 @@ describe('notificationsSend', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index 2c314711b..a9a4d7a17 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -133,7 +133,6 @@ describe('gestaltsDiscoveryByIdentity', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -142,6 +141,8 @@ describe('gestaltsDiscoveryByIdentity', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -177,6 +178,7 @@ describe('gestaltsDiscoveryByIdentity', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index e553a0693..e34f5f8ed 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -134,7 +134,6 @@ describe('gestaltsDiscoveryByNode', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -143,6 +142,8 @@ describe('gestaltsDiscoveryByNode', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -178,6 +179,7 @@ describe('gestaltsDiscoveryByNode', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index ec38cc41d..949a5f5e4 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -199,7 +199,6 @@ describe('gestaltsGestaltTrustByIdentity', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -208,6 +207,8 @@ describe('gestaltsGestaltTrustByIdentity', () => { nodeConnectionManager, logger: logger.getChild('nodeManager'), }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -248,6 +249,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index 1c1ad87b0..d8ecae06e 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -198,7 +198,6 @@ describe('gestaltsGestaltTrustByNode', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -207,6 +206,8 @@ describe('gestaltsGestaltTrustByNode', () => { nodeConnectionManager, logger: logger.getChild('nodeManager'), }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -247,6 +248,7 @@ describe('gestaltsGestaltTrustByNode', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index 39535394a..f03e1be07 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -2,6 +2,7 @@ import type { ClaimLinkIdentity } from '@/claims/types'; import type { NodeIdEncoded } from '@/nodes/types'; import type { IdentityId, ProviderId } from '@/identities/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -55,6 +56,7 @@ describe('identitiesClaim', () => { let mockedGenerateKeyPair: jest.SpyInstance; let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let mockedAddClaim: jest.SpyInstance; + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const claim = await claimsUtils.createClaim({ @@ -142,7 +144,7 @@ describe('identitiesClaim', () => { nodeGraph, logger: logger.getChild('nodeConnectionManager'), }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const clientService = { identitiesClaim: identitiesClaim({ authenticate, diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index e0a07e171..94d925acc 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -104,7 +104,6 @@ describe('nodesAdd', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -113,6 +112,8 @@ describe('nodesAdd', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesAdd: nodesAdd({ authenticate, diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index bc04e2ae6..47102fe1a 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -134,7 +134,6 @@ describe('nodesClaim', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -143,6 +142,8 @@ describe('nodesClaim', () => { nodeConnectionManager, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index b01e157a1..21372cb4c 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -1,4 +1,5 @@ import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -108,7 +109,7 @@ describe('nodesFind', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); const clientService = { nodesFind: nodesFind({ authenticate, diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index d4954bb4a..6fd489d36 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -109,7 +109,6 @@ describe('nodesPing', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -118,6 +117,7 @@ describe('nodesPing', () => { sigchain, logger, }); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesPing: nodesPing({ authenticate, diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index d8572c584..c2a1c5cd3 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -113,7 +113,6 @@ describe('notificationsClear', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -122,6 +121,8 @@ describe('notificationsClear', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index d78bb5eaa..24b8b9542 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -188,7 +188,6 @@ describe('notificationsRead', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -197,6 +196,8 @@ describe('notificationsRead', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 7709f7b47..220edfe83 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -122,7 +122,6 @@ describe('notificationsSend', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -131,6 +130,8 @@ describe('notificationsSend', () => { sigchain, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 1b6e0e120..da9acd92b 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -138,7 +138,6 @@ describe('Discovery', () => { connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -147,6 +146,8 @@ describe('Discovery', () => { nodeConnectionManager, logger: logger.getChild('nodeManager'), }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up other gestalt nodeA = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -202,6 +203,7 @@ describe('Discovery', () => { await nodeA.stop(); await nodeB.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 5ef4ed470..35c084fa9 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -237,8 +237,6 @@ describe('${NodeConnection.name} test', () => { proxy: serverProxy, logger, }); - await serverNodeConnectionManager.start(); - serverNodeManager = new NodeManager({ db: serverDb, sigchain: serverSigchain, @@ -247,6 +245,8 @@ describe('${NodeConnection.name} test', () => { nodeConnectionManager: serverNodeConnectionManager, logger: logger, }); + await serverNodeManager.start(); + await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ keyManager: serverKeyManager, vaultsPath: serverVaultsPath, @@ -355,6 +355,7 @@ describe('${NodeConnection.name} test', () => { await serverNodeGraph.stop(); await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); + await serverNodeManager.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentTestUtils.closeTestAgentServer(agentServer); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index d13c838de..6231e5dcc 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -1,5 +1,6 @@ import type { NodeAddress, NodeBucket, NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -124,6 +125,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -232,7 +234,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 1: node already exists in the local node graph (no contact required) const nodeId = nodeId1; @@ -259,7 +261,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 2: node can be found on the remote node const nodeId = nodeId1; @@ -300,7 +302,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 3: node exhausts all contacts and cannot find node const nodeId = nodeId1; @@ -354,7 +356,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const targetNodeId = serverPKAgent.keyManager.getNodeId(); await nodeGraph.setNode(targetNodeId, { host: serverPKAgent.proxy.getProxyHost(), @@ -424,7 +426,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. @@ -461,7 +463,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 5871fa4d3..979403ec3 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -1,10 +1,6 @@ -import type { - NodeAddress, - NodeId, - NodeIdString, - SeedNodes, -} from '@/nodes/types'; +import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -91,6 +87,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -202,7 +199,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -227,7 +224,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -261,7 +258,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -289,7 +286,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; @@ -342,7 +339,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { connConnectTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Add the dummy node await nodeGraph.setNode(dummyNodeId, { host: '125.0.0.1' as Host, @@ -382,7 +379,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; expect(connections.size).toBe(0); @@ -408,7 +405,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -441,7 +438,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -474,7 +471,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Do testing // set up connections await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -506,32 +503,6 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); // New ping tests - test('should ping node', async () => { - // NodeConnectionManager under test - let nodeConnectionManager: NodeConnectionManager | undefined; - try { - nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - // @ts-ignore: kidnap connections - const connections = nodeConnectionManager.connections; - await expect(nodeConnectionManager.pingNode(remoteNodeId1)).resolves.toBe( - true, - ); - const finalConnLock = connections.get( - remoteNodeId1.toString() as NodeIdString, - ); - // Check entry is in map and lock is released - expect(finalConnLock).toBeDefined(); - expect(finalConnLock?.lock.isLocked()).toBeFalsy(); - } finally { - await nodeConnectionManager?.stop(); - } - }); test('should ping node with address', async () => { // NodeConnectionManager under test let nodeConnectionManager: NodeConnectionManager | undefined; @@ -542,33 +513,12 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); - const remoteNodeAddress1: NodeAddress = { - host: remoteNode1.proxy.getProxyHost(), - port: remoteNode1.proxy.getProxyPort(), - }; - await nodeConnectionManager.pingNode(remoteNodeId1, remoteNodeAddress1); - } finally { - await nodeConnectionManager?.stop(); - } - }); - test('should ping node with address when connection exists', async () => { - // NodeConnectionManager under test - let nodeConnectionManager: NodeConnectionManager | undefined; - try { - nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - const remoteNodeAddress1: NodeAddress = { - host: remoteNode1.proxy.getProxyHost(), - port: remoteNode1.proxy.getProxyPort(), - }; - await nodeConnectionManager.withConnF(remoteNodeId1, nop); - await nodeConnectionManager.pingNode(remoteNodeId1, remoteNodeAddress1); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), + ); } finally { await nodeConnectionManager?.stop(); } @@ -583,36 +533,14 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Pinging node expect( await nodeConnectionManager.pingNode( remoteNodeId1, - { host: '127.1.2.3' as Host, port: 55555 as Port }, - timerStart(1000), - ), - ).toEqual(false); - } finally { - await nodeConnectionManager?.stop(); - } - }); - test('should fail to ping node with wrong address when connection exists', async () => { - // NodeConnectionManager under test - let nodeConnectionManager: NodeConnectionManager | undefined; - try { - nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - await nodeConnectionManager.withConnF(remoteNodeId1, nop); - expect( - await nodeConnectionManager.pingNode( - remoteNodeId1, - { host: '127.1.2.3' as Host, port: 55555 as Port }, + '127.1.2.3' as Host, + 55555 as Port, timerStart(1000), ), ).toEqual(false); @@ -630,20 +558,13 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); - const remoteNodeAddress1: NodeAddress = { - host: remoteNode1.proxy.getProxyHost(), - port: remoteNode1.proxy.getProxyPort(), - }; - const remoteNodeAddress2: NodeAddress = { - host: remoteNode2.proxy.getProxyHost(), - port: remoteNode2.proxy.getProxyPort(), - }; + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); expect( await nodeConnectionManager.pingNode( remoteNodeId1, - remoteNodeAddress2, + remoteNode2.proxy.getProxyHost(), + remoteNode2.proxy.getProxyPort(), timerStart(1000), ), ).toEqual(false); @@ -651,7 +572,8 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { expect( await nodeConnectionManager.pingNode( remoteNodeId2, - remoteNodeAddress1, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), timerStart(1000), ), ).toEqual(false); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index b5ecf3e3c..ec7d6ee44 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -1,5 +1,6 @@ import type { NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -77,6 +78,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -187,7 +189,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toContainEqual(nodeId1); expect(seedNodes).toContainEqual(nodeId2); @@ -210,7 +212,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toHaveLength(3); @@ -248,7 +250,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); await nodeConnectionManager.syncNodeGraph(); expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); @@ -290,7 +292,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { connConnectTime: 500, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // This should complete without error await nodeConnectionManager.syncNodeGraph(); // Information on remotes are found diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 3fa14f66c..0422cf223 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -1,6 +1,7 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -85,6 +86,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeEach(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -247,7 +249,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection await expect( @@ -287,7 +289,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const resultP = nodeConnectionManager.withConnF(dummyNodeId, async () => { @@ -330,7 +332,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const connectionAttemptP = nodeConnectionManager.withConnF( @@ -373,7 +375,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -430,7 +432,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -509,7 +511,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -581,7 +583,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -658,7 +660,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -735,7 +737,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index 7b93b596d..494350f52 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,5 +1,6 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -78,6 +79,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -189,7 +191,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections @@ -226,7 +228,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections @@ -278,7 +280,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { proxy, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connections diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 4e4c2f62b..605490dbf 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -116,7 +116,6 @@ describe(`${NodeManager.name} test`, () => { proxy, logger, }); - await nodeConnectionManager.start(); }); afterEach(async () => { mockedPingNode.mockClear(); @@ -169,6 +168,8 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set server node offline await server.stop(); @@ -240,6 +241,8 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // We want to get the public key of the server const key = await nodeManager.getPublicKey(serverNodeId); @@ -425,6 +428,8 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeGraph.setNode(xNodeId, xNodeAddress); @@ -445,17 +450,23 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager: {} as NodeConnectionManager, logger, }); - const localNodeId = keyManager.getNodeId(); - const bucketIndex = 100; - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - await nodeManager.setNode(nodeId, {} as NodeAddress); + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, {} as NodeAddress); - // Checking bucket - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(1); + // Checking bucket + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + } finally { + await nodeManager.stop(); + } }); test('should update a node if node exists', async () => { const nodeManager = new NodeManager({ @@ -466,29 +477,35 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager: {} as NodeConnectionManager, logger, }); - const localNodeId = keyManager.getNodeId(); - const bucketIndex = 100; - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - await nodeManager.setNode(nodeId, { - host: '' as Host, - port: 11111 as Port, - }); + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 11111 as Port, + }); - const nodeData = (await nodeGraph.getNode(nodeId))!; - await sleep(1100); + const nodeData = (await nodeGraph.getNode(nodeId))!; + await sleep(1100); - // Should update the node - await nodeManager.setNode(nodeId, { - host: '' as Host, - port: 22222 as Port, - }); + // Should update the node + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 22222 as Port, + }); - const newNodeData = (await nodeGraph.getNode(nodeId))!; - expect(newNodeData.address.port).not.toEqual(nodeData.address.port); - expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + const newNodeData = (await nodeGraph.getNode(nodeId))!; + expect(newNodeData.address.port).not.toEqual(nodeData.address.port); + expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + } finally { + await nodeManager.stop(); + } }); test('should not add node if bucket is full and old node is alive', async () => { const nodeManager = new NodeManager({ @@ -499,40 +516,46 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager: {} as NodeConnectionManager, logger, }); - const localNodeId = keyManager.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } const nodeId = nodesTestUtils.generateNodeIdForBucket( localNodeId, bucketIndex, - i, ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + const oldestNode = await nodeGraph.getNode(oldestNodeId!); + // Waiting for a second to tick over + await sleep(1500); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was not added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeUndefined(); + // Oldest node was updated + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); - nodeManagerPingMock.mockResolvedValue(true); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - const oldestNode = await nodeGraph.getNode(oldestNodeId!); - // Waiting for a second to tick over - await sleep(1100); - // Adding a new node with bucket full - await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress); - // Bucket still contains max nodes - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); - // New node was not added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeUndefined(); - // Oldest node was updated - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); - nodeManagerPingMock.mockRestore(); }); test('should add node if bucket is full, old node is alive and force is set', async () => { const nodeManager = new NodeManager({ @@ -543,37 +566,48 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager: {} as NodeConnectionManager, logger, }); - const localNodeId = keyManager.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } const nodeId = nodesTestUtils.generateNodeIdForBucket( localNodeId, bucketIndex, - i, ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode( + nodeId, + { port: 55555 } as NodeAddress, + false, + true, + ); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); - nodeManagerPingMock.mockResolvedValue(true); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - // Adding a new node with bucket full - await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); - // Bucket still contains max nodes - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); - // New node was added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeDefined(); - // Oldest node was removed - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew).toBeUndefined(); - nodeManagerPingMock.mockRestore(); }); test('should add node if bucket is full and old node is dead', async () => { const nodeManager = new NodeManager({ @@ -584,41 +618,54 @@ describe(`${NodeManager.name} test`, () => { nodeConnectionManager: {} as NodeConnectionManager, logger, }); - const localNodeId = keyManager.getNodeId(); - const bucketIndex = 100; - // Creating 20 nodes in bucket - for (let i = 1; i <= 20; i++) { + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } const nodeId = nodesTestUtils.generateNodeIdForBucket( localNodeId, bucketIndex, - i, ); - await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(false); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); } - const nodeId = nodesTestUtils.generateNodeIdForBucket( - localNodeId, - bucketIndex, - ); - // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); - nodeManagerPingMock.mockResolvedValue(false); - const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); - // Adding a new node with bucket full - await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); - // Bucket still contains max nodes - const bucket = await nodeManager.getBucket(bucketIndex); - expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); - // New node was added - const node = await nodeGraph.getNode(nodeId); - expect(node).toBeDefined(); - // Oldest node was removed - const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); - expect(oldestNodeNew).toBeUndefined(); - nodeManagerPingMock.mockRestore(); }); test('should add node when an incoming connection is established', async () => { let server: PolykeyAgent | undefined; + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + logger, + }); try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); server = await PolykeyAgent.createPolykeyAgent({ password: 'password', nodePath: path.join(dataDir, 'server'), @@ -655,101 +702,90 @@ describe(`${NodeManager.name} test`, () => { // Clean up await server?.stop(); await server?.destroy(); + await nodeManager.stop(); } }); test('should not add nodes to full bucket if pings succeeds', async () => { - const tempNodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); mockedPingNode.mockImplementation(async (_) => true); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, - nodeGraph: tempNodeGraph, + nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, logger, }); - await nodeManager.start(); - const nodeId = keyManager.getNodeId(); - const address = { host: localhost, port }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } - - // Helpers - const listBucket = async (bucketIndex: number) => { - const bucket = await nodeManager.getBucket(bucketIndex); - return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); - }; + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } - // Pings succeed, node not added - mockedPingNode.mockImplementation(async (_) => true); - const newNode = generateNodeIdForBucket(nodeId, 100, 21); - await nodeManager.setNode(newNode, address); - expect(await listBucket(100)).not.toContain( - nodesUtils.encodeNodeId(newNode), - ); + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; - // Clean up - await nodeManager.queueDrained(); - await nodeManager.stop(); - await tempNodeGraph.stop(); - await tempNodeGraph.destroy(); + // Pings succeed, node not added + mockedPingNode.mockImplementation(async (_) => true); + const newNode = generateNodeIdForBucket(nodeId, 100, 21); + await nodeManager.setNode(newNode, address); + expect(await listBucket(100)).not.toContain( + nodesUtils.encodeNodeId(newNode), + ); + } finally { + await nodeManager.stop(); + } }); test('should add nodes to full bucket if pings fail', async () => { - const tempNodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); mockedPingNode.mockImplementation(async (_) => true); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, - nodeGraph: tempNodeGraph, + nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, logger, }); await nodeManager.start(); - const nodeId = keyManager.getNodeId(); - const address = { host: localhost, port }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } - // Helpers - const listBucket = async (bucketIndex: number) => { - const bucket = await nodeManager.getBucket(bucketIndex); - return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); - }; - - // Pings fail, new nodes get added - mockedPingNode.mockImplementation(async (_) => false); - const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); - const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); - const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); - await nodeManager.setNode(newNode1, address); - await nodeManager.setNode(newNode2, address); - await nodeManager.setNode(newNode3, address); - await nodeManager.queueDrained(); - const list = await listBucket(100); - expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); - expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); - expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); - - // Clean up - await nodeManager.queueDrained(); - await nodeManager.stop(); - await tempNodeGraph.stop(); - await tempNodeGraph.destroy(); + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings fail, new nodes get added + mockedPingNode.mockImplementation(async (_) => false); + const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); + const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); + const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); + await nodeManager.setNode(newNode1, address); + await nodeManager.setNode(newNode2, address); + await nodeManager.setNode(newNode3, address); + await nodeManager.queueDrained(); + const list = await listBucket(100); + expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); + } finally { + await nodeManager.stop(); + } }); test('should not block when bucket is full', async () => { const tempNodeGraph = await NodeGraph.createNodeGraph({ @@ -767,72 +803,65 @@ describe(`${NodeManager.name} test`, () => { logger, }); await nodeManager.start(); - const nodeId = keyManager.getNodeId(); - const address = { host: localhost, port }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } - // Set node does not block - const delayPing = promise(); - mockedPingNode.mockImplementation(async (_) => { - await delayPing.p; - return true; - }); - const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); - await expect( - nodeManager.setNode(newNode4, address), - ).resolves.toBeUndefined(); - delayPing.resolveP(null); - await nodeManager.queueDrained(); - - // Clean up - await nodeManager.queueDrained(); - await nodeManager.stop(); - await tempNodeGraph.stop(); - await tempNodeGraph.destroy(); + // Set node does not block + const delayPing = promise(); + mockedPingNode.mockImplementation(async (_) => { + await delayPing.p; + return true; + }); + const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode4, address), + ).resolves.toBeUndefined(); + delayPing.resolveP(null); + await nodeManager.queueDrained(); + } finally { + await nodeManager.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + } }); test('should block when blocking is set to true', async () => { - const tempNodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); mockedPingNode.mockImplementation(async (_) => true); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, - nodeGraph: tempNodeGraph, + nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, logger, }); await nodeManager.start(); - const nodeId = keyManager.getNodeId(); - const address = { host: localhost, port }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } - // Set node can block - mockedPingNode.mockClear(); - mockedPingNode.mockImplementation(async (_) => { - return true; - }); - const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); - await expect( - nodeManager.setNode(newNode5, address, true), - ).resolves.toBeUndefined(); - expect(mockedPingNode).toBeCalled(); - - // CLean up - await nodeManager.queueDrained(); - await nodeManager.stop(); - await tempNodeGraph.stop(); - await tempNodeGraph.destroy(); + // Set node can block + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async () => true); + const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode5, address, true), + ).resolves.toBeUndefined(); + expect(mockedPingNode).toBeCalled(); + } finally { + await nodeManager.stop(); + } }); }); diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 37be01f56..b382ea49d 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -118,7 +118,6 @@ describe('NotificationsManager', () => { proxy, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, @@ -127,6 +126,8 @@ describe('NotificationsManager', () => { nodeGraph, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up node for receiving notifications receiver = await PolykeyAgent.createPolykeyAgent({ password: password, diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index f37dfba38..5236215b0 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -7,6 +7,7 @@ import type { } from '@/vaults/types'; import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -493,7 +494,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 1'), nodePath: path.join(allDataDir, 'remoteKeynode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode1Id = remoteKeynode1.keyManager.getNodeId(); @@ -503,7 +504,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 2'), nodePath: path.join(allDataDir, 'remoteKeynode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode2Id = remoteKeynode2.keyManager.getNodeId(); @@ -581,7 +582,9 @@ describe('VaultManager', () => { proxy, logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); await nodeGraph.setNode(remoteKeynode1Id, { host: remoteKeynode1.proxy.getProxyHost(), @@ -1454,7 +1457,7 @@ describe('VaultManager', () => { password: 'password', nodePath: path.join(dataDir, 'remoteNode'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, logger, }); @@ -1496,7 +1499,9 @@ describe('VaultManager', () => { proxy, connConnectTime: 1000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyManager, From b4fbc0b6ca862d04c605ccafa3cac11cce98453b Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 8 Apr 2022 18:28:22 +1000 Subject: [PATCH 18/39] feat: implemented `NodeManager.refreshBucket()` This method preforms the kademlia `refreshBucket` operation. It selects a random node within the bucket and preforms a search for that node. The process exchanges node information with any nodes it connects to. #345 --- src/nodes/NodeManager.ts | 28 ++++++++++++++++++++++++-- src/nodes/utils.ts | 42 +++++++++++++++++++++++++++++++++++++++ tests/nodes/utils.test.ts | 18 +++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index f94c5315d..61fac9838 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -5,11 +5,17 @@ import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; -import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import { IdInternal } from '@matrixai/id'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -514,7 +520,7 @@ class NodeManager { // return await this.nodeGraph.getAllBuckets(tran); // } - // FIXME + // FIXME potentially confusing name, should we rename this to renewBuckets? /** * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. @@ -609,6 +615,24 @@ class NodeManager { public async queueDrained(): Promise { await this.setNodeQueueEmpty; } + + /** + * Kademlia refresh bucket operation. + * It picks a random node within a bucket and does a search for that node. + * Connections during the search will will share node information with other + * nodes. + * @param bucketIndex + */ + private async refreshBucket(bucketIndex: NodeBucketIndex) { + // We need to generate a random nodeId for this bucket + const nodeId = this.keyManager.getNodeId(); + const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( + nodeId, + bucketIndex, + ); + // We then need to start a findNode procedure + await this.nodeConnectionManager.findNode(bucketRandomNodeId); + } } export default NodeManager; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 76bb4058a..c61a6cd58 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -7,6 +7,7 @@ import type { import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; import { bytes2BigInt, bufferSplit } from '../utils'; +import * as keysUtils from '../keys/utils'; // FIXME: const prefixBuffer = Buffer.from([33]); @@ -283,6 +284,44 @@ function bucketSortByDistance( } } +function generateRandomDistanceForBucket(bucketIndex: NodeBucketIndex): NodeId { + const buffer = keysUtils.getRandomBytesSync(32); + // Calculate the most significant byte for bucket + const base = bucketIndex / 8; + const mSigByte = Math.floor(base); + const mSigBit = (base - mSigByte) * 8 + 1; + const mSigByteIndex = buffer.length - mSigByte - 1; + // Creating masks + // AND mask should look like 0b00011111 + // OR mask should look like 0b00010000 + const shift = 8 - mSigBit; + const andMask = 0b11111111 >>> shift; + const orMask = 0b10000000 >>> shift; + let byte = buffer[mSigByteIndex]; + byte = byte & andMask; // Forces 0 for bits above bucket bit + byte = byte | orMask; // Forces 1 in the desired bucket bit + buffer[mSigByteIndex] = byte; + // Zero out byte 'above' mSigByte + for (let byteIndex = 0; byteIndex < mSigByteIndex; byteIndex++) { + buffer[byteIndex] = 0; + } + return IdInternal.fromBuffer(buffer); +} + +function xOrNodeId(node1: NodeId, node2: NodeId): NodeId { + const xOrNodeArray = node1.map((byte, i) => byte ^ node2[i]); + const xOrNodeBuffer = Buffer.from(xOrNodeArray); + return IdInternal.fromBuffer(xOrNodeBuffer); +} + +function generateRandomNodeIdForBucket( + nodeId: NodeId, + bucket: NodeBucketIndex, +): NodeId { + const randomDistanceForBucket = generateRandomDistanceForBucket(bucket); + return xOrNodeId(nodeId, randomDistanceForBucket); +} + export { prefixBuffer, encodeNodeId, @@ -299,4 +338,7 @@ export { parseLastUpdatedBucketDbKey, nodeDistance, bucketSortByDistance, + generateRandomDistanceForBucket, + xOrNodeId, + generateRandomNodeIdForBucket, }; diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index 59d565812..c87a82f26 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -171,4 +171,22 @@ describe('nodes/utils', () => { i++; } }); + test('should generate random distance for a bucket', async () => { + // Const baseNodeId = testNodesUtils.generateRandomNodeId(); + const zeroNodeId = IdInternal.fromBuffer(Buffer.alloc(32, 0)); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomDistanceForBucket(i); + expect(nodesUtils.bucketIndex(zeroNodeId, randomDistance)).toEqual(i); + } + }); + test('should generate random NodeId for a bucket', async () => { + const baseNodeId = testNodesUtils.generateRandomNodeId(); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomNodeIdForBucket( + baseNodeId, + i, + ); + expect(nodesUtils.bucketIndex(baseNodeId, randomDistance)).toEqual(i); + } + }); }); From 077195ff6aa17c9a28fc4270ca4951b7e8849dd8 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Mon, 11 Apr 2022 16:52:13 +1000 Subject: [PATCH 19/39] feat: implemented no activity timers and queuing for `refreshBucket` Added queuing for `refreshBucket`. This means that buckets will be refreshed one at a time sequentially. This is to avoid doing a lot of costly refreshing all at once. Added no activity for buckets. If a bucket hasn't been touched for a while, 1 hour by default, it will add a refresh bucket operation to the queue. Timers are disabled for buckets already in the queue. Only 1 timer is used for all buckets since only one of them can have the shortest timer and that's all we really care about. #345 --- src/nodes/NodeManager.ts | 168 +++++++++++++++++++++++++++++++- src/utils/utils.ts | 12 ++- tests/nodes/NodeManager.test.ts | 107 +++++++++++++++++++- 3 files changed, 278 insertions(+), 9 deletions(-) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 61fac9838..233e1b316 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -13,9 +13,9 @@ import type { } from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; +import type { PromiseType } from '../utils/utils'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; -import { IdInternal } from '@matrixai/id'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -24,7 +24,7 @@ import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; -import { timerStart } from '../utils/utils'; +import { promise, timerStart } from '../utils/utils'; interface NodeManager extends StartStop {} @StartStop() @@ -47,6 +47,16 @@ class NodeManager { protected setNodeQueueRunner: Promise; protected setNodeQueueEmpty: Promise; protected setNodeQueueDrained: () => void; + // Refresh bucket timer + protected refreshBucketDeadlineMap: Map = new Map(); + protected refreshBucketTimer: NodeJS.Timer; + protected refreshBucketNext: NodeBucketIndex; + public readonly refreshBucketTimerDefault; + protected refreshBucketQueue: Set = new Set(); + protected refreshBucketQueueRunning: boolean = false; + protected refreshBucketQueueRunner: Promise; + protected refreshBucketQueuePlug_: PromiseType; + protected refreshBucketQueueDrained_: PromiseType; constructor({ db, @@ -54,6 +64,7 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, + refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds logger, }: { db: DB; @@ -61,6 +72,7 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; + refreshBucketTimerDefault?: number; logger?: Logger; }) { this.logger = logger ?? new Logger(this.constructor.name); @@ -69,17 +81,22 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; + this.refreshBucketTimerDefault = refreshBucketTimerDefault; } public async start() { this.logger.info(`Starting ${this.constructor.name}`); this.setNodeQueueRunner = this.startSetNodeQueue(); + this.startRefreshBucketTimers(); + this.refreshBucketQueueRunner = this.startRefreshBucketQueue(); this.logger.info(`Started ${this.constructor.name}`); } public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); await this.stopSetNodeQueue(); + await this.stopRefreshBucketTimers(); + await this.stopRefreshBucketQueue(); this.logger.info(`Stopped ${this.constructor.name}`); } @@ -419,6 +436,8 @@ class NodeManager { // Either already exists or has room in the bucket // We want to add or update the node await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); } else { // We want to add a node but the bucket is full // We need to ping the oldest node @@ -434,6 +453,8 @@ class NodeManager { ); await this.nodeGraph.unsetNode(oldNodeId, tran); await this.nodeGraph.setNode(nodeId, nodeAddress, tran); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); return; } if (blocking) { @@ -491,6 +512,8 @@ class NodeManager { ); const node = (await this.nodeGraph.getNode(nodeId))!; await this.nodeGraph.setNode(nodeId, node.address); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); } else { this.logger.debug(`Ping failed for ${nodesUtils.encodeNodeId(nodeId)}`); // Otherwise we remove the node @@ -502,6 +525,8 @@ class NodeManager { if (count < this.nodeGraph.nodeBucketLimit) { this.logger.debug(`Bucket ${bucketIndex} now has room, adding new node`); await this.nodeGraph.setNode(nodeId, nodeAddress); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); } } @@ -623,7 +648,7 @@ class NodeManager { * nodes. * @param bucketIndex */ - private async refreshBucket(bucketIndex: NodeBucketIndex) { + public async refreshBucket(bucketIndex: NodeBucketIndex) { // We need to generate a random nodeId for this bucket const nodeId = this.keyManager.getNodeId(); const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( @@ -633,6 +658,143 @@ class NodeManager { // We then need to start a findNode procedure await this.nodeConnectionManager.findNode(bucketRandomNodeId); } + + // Refresh bucket activity timer methods + + private startRefreshBucketTimers() { + // Setting initial bucket to refresh + this.refreshBucketNext = 0; + // Setting initial deadline + this.refreshBucketTimerReset(this.refreshBucketTimerDefault); + + for ( + let bucketIndex = 0; + bucketIndex < this.nodeGraph.nodeIdBits; + bucketIndex++ + ) { + const deadline = Date.now() + this.refreshBucketTimerDefault; + this.refreshBucketDeadlineMap.set(bucketIndex, deadline); + } + } + + private async stopRefreshBucketTimers() { + clearTimeout(this.refreshBucketTimer); + } + + private refreshBucketTimerReset(timeout: number) { + clearTimeout(this.refreshBucketTimer); + this.refreshBucketTimer = setTimeout(() => { + this.refreshBucketRefreshTimer(); + }, timeout); + } + + public refreshBucketUpdateDeadline(bucketIndex: NodeBucketIndex) { + // Update the map deadline + this.refreshBucketDeadlineMap.set( + bucketIndex, + Date.now() + this.refreshBucketTimerDefault, + ); + // If the bucket was pending a refresh we remove it + this.refreshBucketQueueRemove(bucketIndex); + if (bucketIndex === this.refreshBucketNext) { + // Bucket is same as next bucket, this affects the timer + this.refreshBucketRefreshTimer(); + } + } + + private refreshBucketRefreshTimer() { + // Getting new closest deadline + let closestBucket = this.refreshBucketNext; + let closestDeadline = Date.now() + this.refreshBucketTimerDefault; + const now = Date.now(); + for (const [bucketIndex, deadline] of this.refreshBucketDeadlineMap) { + // Skip any queued buckets marked by 0 deadline + if (deadline === 0) continue; + if (deadline <= now) { + // Deadline for this has already passed, we add it to the queue + this.refreshBucketQueueAdd(bucketIndex); + continue; + } + if (deadline < closestDeadline) { + closestBucket = bucketIndex; + closestDeadline = deadline; + } + } + // Working out time left + const timeout = closestDeadline - Date.now(); + this.logger.debug( + `Refreshing refreshBucket timer with new timeout ${timeout}`, + ); + // Updating timer and next + this.refreshBucketNext = closestBucket; + this.refreshBucketTimerReset(timeout); + } + + // Refresh bucket async queue methods + + public refreshBucketQueueAdd(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Adding bucket ${bucketIndex} to queue`); + this.refreshBucketDeadlineMap.set(bucketIndex, 0); + this.refreshBucketQueue.add(bucketIndex); + this.refreshBucketQueueUnplug(); + } + + public refreshBucketQueueRemove(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Removing bucket ${bucketIndex} from queue`); + this.refreshBucketQueue.delete(bucketIndex); + } + + public async refreshBucketQueueDrained() { + await this.refreshBucketQueueDrained_.p; + } + + private async startRefreshBucketQueue(): Promise { + this.refreshBucketQueueRunning = true; + this.refreshBucketQueuePlug(); + let iterator: IterableIterator | undefined; + const pace = async () => { + // Wait for plug + await this.refreshBucketQueuePlug_.p; + if (iterator == null) { + iterator = this.refreshBucketQueue[Symbol.iterator](); + } + return this.refreshBucketQueueRunning; + }; + while (await pace()) { + const bucketIndex: NodeBucketIndex = iterator?.next().value; + if (bucketIndex == null) { + // Iterator is empty, plug and continue + iterator = undefined; + this.refreshBucketQueuePlug(); + continue; + } + // Do the job + this.logger.debug( + `processing refreshBucket for bucket ${bucketIndex}, ${this.refreshBucketQueue.size} left in queue`, + ); + await this.refreshBucket(bucketIndex); + // Remove from queue and update bucket deadline + this.refreshBucketQueue.delete(bucketIndex); + this.refreshBucketUpdateDeadline(bucketIndex); + } + this.logger.debug('startRefreshBucketQueue has ended'); + } + + private async stopRefreshBucketQueue(): Promise { + // Flag end and await queue finish + this.refreshBucketQueueRunning = false; + this.refreshBucketQueueUnplug(); + } + + private refreshBucketQueuePlug() { + this.refreshBucketQueuePlug_ = promise(); + this.refreshBucketQueueDrained_?.resolveP(); + } + + private refreshBucketQueueUnplug() { + this.refreshBucketQueueDrained_ = promise(); + this.refreshBucketQueuePlug_?.resolveP(); + } } export default NodeManager; diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 7c623e8dd..0b99a8a43 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -170,14 +170,16 @@ function promisify< }; } -/** - * Deconstructed promise - */ -function promise(): { +export type PromiseType = { p: Promise; resolveP: (value: T | PromiseLike) => void; rejectP: (reason?: any) => void; -} { +}; + +/** + * Deconstructed promise + */ +function promise(): PromiseType { let resolveP, rejectP; const p = new Promise((resolve, reject) => { resolveP = resolve; diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 605490dbf..04880a0ff 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -24,7 +24,7 @@ import { generateNodeIdForBucket } from './utils'; describe(`${NodeManager.name} test`, () => { const password = 'password'; - const logger = new Logger(`${NodeManager.name} test`, LogLevel.DEBUG, [ + const logger = new Logger(`${NodeManager.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); let dataDir: string; @@ -864,4 +864,109 @@ describe(`${NodeManager.name} test`, () => { await nodeManager.stop(); } }); + test('should update deadline when updating a bucket', async () => { + const refreshBucketTimeout = 100000; + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // @ts-ignore: kidnap map + const deadlineMap = nodeManager.refreshBucketDeadlineMap; + // Getting starting value + const bucket = 0; + const startingDeadline = deadlineMap.get(bucket); + const nodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + bucket, + ); + await sleep(1000); + await nodeManager.setNode(nodeId, {} as NodeAddress); + // Deadline should be updated + const newDeadline = deadlineMap.get(bucket); + expect(newDeadline).not.toEqual(startingDeadline); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + } + }); + test('should add buckets to the queue when exceeding deadline', async () => { + const refreshBucketTimeout = 100; + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + const mockRefreshBucketQueueAdd = jest.spyOn( + NodeManager.prototype, + 'refreshBucketQueueAdd', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // Getting starting value + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(0); + await sleep(200); + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(256); + } finally { + mockRefreshBucketQueueAdd.mockRestore(); + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + } + }); + test('should digest queue to refresh buckets', async () => { + const refreshBucketTimeout = 1000000; + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation(async () => {}); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.refreshBucketQueueDrained(); + expect(mockRefreshBucket).toHaveBeenCalledTimes(5); + + // Add buckets to queue + // check if refresh buckets was called + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + } + }); }); From 2b8d971bed4afa5ae8182cb57143711e512b39fb Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Mon, 11 Apr 2022 18:04:26 +1000 Subject: [PATCH 20/39] feat: refreshing buckets when entering network `nodeConnectionManager.syncNodeGraph` now refreshes all buckets above the closest node as per the kademlia spec. This means adding a lot of buckets to the refresh bucket queue when an agent is started. #345 --- src/nodes/NodeConnectionManager.ts | 18 +++- src/nodes/NodeGraph.ts | 8 +- src/nodes/NodeManager.ts | 2 +- .../NodeConnectionManager.seednodes.test.ts | 98 ++++++++++++++++++- 4 files changed, 114 insertions(+), 12 deletions(-) diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 544fd5687..fb4f04e79 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -590,9 +590,21 @@ class NodeConnectionManager { timer, ); for (const [nodeId, nodeData] of nodes) { - // FIXME: this should be the `nodeManager.setNode` - // FIXME: no tran needed - await this.nodeGraph.setNode(nodeId, nodeData.address); + // FIXME: needs to ping the node right? we want to be non-blocking + try { + // FIXME: no tran needed + await this.nodeManager?.setNode(nodeId, nodeData.address); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + // Refreshing every bucket above the closest node + const [closestNode] = ( + await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) + ).pop()!; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + this.nodeManager?.refreshBucketQueueAdd(i); } } } diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index d7437b389..3baf60299 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -697,10 +697,10 @@ class NodeGraph { // 2. iterate over 0 ---> T-1 // 3. iterate over T+1 ---> K // Need to work out the relevant bucket to start from - const startingBucket = nodesUtils.bucketIndex( - this.keyManager.getNodeId(), - nodeId, - ); + const localNodeId = this.keyManager.getNodeId(); + const startingBucket = localNodeId.equals(nodeId) + ? 0 + : nodesUtils.bucketIndex(this.keyManager.getNodeId(), nodeId); // Getting the whole target's bucket first const nodeIds: NodeBucket = await this.getBucket( startingBucket, diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 233e1b316..37fdf3cf0 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -115,7 +115,7 @@ class NodeManager { // We need to attempt a connection using the proxies // For now we will just do a forward connect + relay message const targetAddress = - address ?? (await this.nodeConnectionManager.findNode(nodeId)); + address ?? (await this.nodeConnectionManager.findNode(nodeId))!; const targetHost = await networkUtils.resolveHost(targetAddress.host); return await this.nodeConnectionManager.pingNode( nodeId, diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index ec7d6ee44..4d47afb0c 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -1,12 +1,13 @@ import type { NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; -import type NodeManager from 'nodes/NodeManager'; +import type { Sigchain } from '@/sigchain'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import NodeManager from '@/nodes/NodeManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -78,7 +79,10 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keysUtils, 'generateDeterministicKeyPair', ); - const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; + const dummyNodeManager = { + setNode: jest.fn(), + refreshBucketQueueAdd: jest.fn(), + } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -225,6 +229,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -242,6 +252,15 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { seedNodes, logger: logger, }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + }); + await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, port: serverPort, @@ -250,17 +269,77 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await nodeConnectionManager.start({ nodeManager }); await nodeConnectionManager.syncNodeGraph(); expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); } finally { + mockedRefreshBucket.mockRestore(); + await nodeManager?.stop(); + await nodeConnectionManager?.stop(); + } + }); + test('should call refreshBucket when syncing nodeGraph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); + try { + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + seedNodes, + logger: logger, + }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + }); + await nodeManager.start(); + await remoteNode1.nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await remoteNode2.nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + await nodeConnectionManager.start({ nodeManager }); + await nodeConnectionManager.syncNodeGraph(); + await nodeManager.refreshBucketQueueDrained(); + expect(mockedRefreshBucket).toHaveBeenCalled(); + } finally { + mockedRefreshBucket.mockRestore(); + await nodeManager?.stop(); await nodeConnectionManager?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -292,14 +371,25 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { connConnectTime: 500, logger: logger, }); - await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // This should complete without error await nodeConnectionManager.syncNodeGraph(); // Information on remotes are found expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); } finally { + mockedRefreshBucket.mockRestore(); await nodeConnectionManager?.stop(); + await nodeManager?.stop(); } }); }); From d7f2419daf92550263a91710c96161fa063d8a45 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 14 Apr 2022 12:08:25 +1000 Subject: [PATCH 21/39] feat: abort controller support for `NodeManager.refreshBucket` Added support to cancel out of a `refreshBucket` operation. This is to allow faster stopping of the `NodeManager` by aborting out of a slow `refreshBucket` operation. This has been implemented with the `AbortController`/`AbortSignal` API. This is not fully supported by Node14 so we're using the `node-abort-controller` to provide functionality for now. #345 --- package.json | 1 + src/nodes/NodeConnectionManager.ts | 19 +++++++++++--- src/nodes/NodeManager.ts | 23 ++++++++++++++--- src/nodes/errors.ts | 6 +++++ tests/nodes/NodeManager.test.ts | 40 ++++++++++++++++++++++++++++++ 5 files changed, 82 insertions(+), 7 deletions(-) diff --git a/package.json b/package.json index 292dff2e1..d6c39d77e 100644 --- a/package.json +++ b/package.json @@ -100,6 +100,7 @@ "jose": "^4.3.6", "lexicographic-integer": "^1.1.0", "multiformats": "^9.4.8", + "node-abort-controller": "^3.0.1", "node-forge": "^0.10.0", "pako": "^1.0.11", "prompts": "^2.4.1", diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index fb4f04e79..f7ea93732 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -16,6 +16,7 @@ import type { } from './types'; import { withF } from '@matrixai/resources'; import type NodeManager from './NodeManager'; +import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; import { IdInternal } from '@matrixai/id'; @@ -383,16 +384,21 @@ class NodeConnectionManager { * Retrieves the node address. If an entry doesn't exist in the db, then * proceeds to locate it using Kademlia. * @param targetNodeId Id of the node we are tying to find - * @param tran + * @param options */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async findNode(targetNodeId: NodeId): Promise { + public async findNode( + targetNodeId: NodeId, + options: { signal?: AbortSignal } = {}, + ): Promise { + const { signal } = { ...options }; // First check if we already have an existing ID -> address record - let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network if (address == null) { - address = await this.getClosestGlobalNodes(targetNodeId); + address = await this.getClosestGlobalNodes(targetNodeId, undefined, { + signal, + }); // TODO: This currently just does one iteration // If not found in this single iteration, we throw an exception if (address == null) { @@ -418,13 +424,16 @@ class NodeConnectionManager { * @param targetNodeId ID of the node attempting to be found (i.e. attempting * to find its IP address and port) * @param timer Connection timeout timer + * @param options * @returns whether the target node was located in the process */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getClosestGlobalNodes( targetNodeId: NodeId, timer?: Timer, + options: { signal?: AbortSignal } = {}, ): Promise { + const { signal } = { ...options }; // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) @@ -445,6 +454,7 @@ class NodeConnectionManager { const contacted: { [nodeId: string]: boolean } = {}; // Iterate until we've found and contacted k nodes while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); // While (!foundTarget) { // Remove the node from the front of the array const nextNode = shortlist.shift(); @@ -479,6 +489,7 @@ class NodeConnectionManager { // Check to see if any of these are the target node. At the same time, add // them to the shortlist for (const [nodeId, nodeData] of foundClosest) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); // Ignore a`ny nodes that have been contacted if (contacted[nodeId]) { continue; diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 37fdf3cf0..2d1fc8240 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -14,8 +14,10 @@ import type { import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; import type { PromiseType } from '../utils/utils'; +import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import { AbortController } from 'node-abort-controller'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -57,6 +59,7 @@ class NodeManager { protected refreshBucketQueueRunner: Promise; protected refreshBucketQueuePlug_: PromiseType; protected refreshBucketQueueDrained_: PromiseType; + protected refreshBucketQueueAbortController: AbortController; constructor({ db, @@ -647,8 +650,13 @@ class NodeManager { * Connections during the search will will share node information with other * nodes. * @param bucketIndex + * @param options */ - public async refreshBucket(bucketIndex: NodeBucketIndex) { + public async refreshBucket( + bucketIndex: NodeBucketIndex, + options: { signal?: AbortSignal } = {}, + ) { + const { signal } = { ...options }; // We need to generate a random nodeId for this bucket const nodeId = this.keyManager.getNodeId(); const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( @@ -656,7 +664,7 @@ class NodeManager { bucketIndex, ); // We then need to start a findNode procedure - await this.nodeConnectionManager.findNode(bucketRandomNodeId); + await this.nodeConnectionManager.findNode(bucketRandomNodeId, { signal }); } // Refresh bucket activity timer methods @@ -752,6 +760,7 @@ class NodeManager { this.refreshBucketQueueRunning = true; this.refreshBucketQueuePlug(); let iterator: IterableIterator | undefined; + this.refreshBucketQueueAbortController = new AbortController(); const pace = async () => { // Wait for plug await this.refreshBucketQueuePlug_.p; @@ -772,7 +781,14 @@ class NodeManager { this.logger.debug( `processing refreshBucket for bucket ${bucketIndex}, ${this.refreshBucketQueue.size} left in queue`, ); - await this.refreshBucket(bucketIndex); + try { + await this.refreshBucket(bucketIndex, { + signal: this.refreshBucketQueueAbortController.signal, + }); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeAborted) break; + throw e; + } // Remove from queue and update bucket deadline this.refreshBucketQueue.delete(bucketIndex); this.refreshBucketUpdateDeadline(bucketIndex); @@ -782,6 +798,7 @@ class NodeManager { private async stopRefreshBucketQueue(): Promise { // Flag end and await queue finish + this.refreshBucketQueueAbortController.abort(); this.refreshBucketQueueRunning = false; this.refreshBucketQueueUnplug(); } diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index 863e19a37..a98fbcaa6 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -2,6 +2,11 @@ import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} +class ErrorNodeAborted extends ErrorNodes { + description = 'Operation was aborted'; + exitCode = sysexits.USAGE; +} + class ErrorNodeManagerNotRunning extends ErrorNodes { static description = 'NodeManager is not running'; exitCode = sysexits.USAGE; @@ -79,6 +84,7 @@ class ErrorNodeConnectionHostWildcard extends ErrorNodes { export { ErrorNodes, + ErrorNodeAborted, ErrorNodeManagerNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 04880a0ff..b83be35d8 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -19,6 +19,7 @@ import * as claimsUtils from '@/claims/utils'; import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as nodesErrors from '@/nodes/errors'; import * as nodesTestUtils from './utils'; import { generateNodeIdForBucket } from './utils'; @@ -969,4 +970,43 @@ describe(`${NodeManager.name} test`, () => { await nodeManager.stop(); } }); + test('should abort refreshBucket queue when stopping', async () => { + const refreshBucketTimeout = 1000000; + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation( + async (bucket, options: { signal?: AbortSignal } = {}) => { + const { signal } = { ...options }; + const prom = promise(); + signal?.addEventListener('abort', () => + prom.rejectP(new nodesErrors.ErrorNodeAborted()), + ); + await prom.p; + }, + ); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.stop(); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + } + }); }); From b9717e186babc2bfccdeda6292d33d59bad3fc2e Mon Sep 17 00:00:00 2001 From: Emma Casolin Date: Tue, 19 Apr 2022 13:14:35 +1000 Subject: [PATCH 22/39] fix: `setNode` no longer adds node twice when `force` is true #322 --- src/nodes/NodeManager.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 2d1fc8240..747ffe4b3 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -459,8 +459,7 @@ class NodeManager { // Updating the refreshBucket timer this.refreshBucketUpdateDeadline(bucketIndex); return; - } - if (blocking) { + } else if (blocking) { this.logger.debug( `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( nodeId, From 93f53c83afb3e4cdcc7e0d48bc684ecbf5f30af6 Mon Sep 17 00:00:00 2001 From: Emma Casolin Date: Thu, 21 Apr 2022 14:52:22 +1000 Subject: [PATCH 23/39] feat: generic `SetNodeQueue` class for queuing `setNode` operations `NodeManager.setNode` and `NodeConnectionManager.syncNodeGraph` now utilise a single, shared queue to asynchronously add nodes to the node graph without blocking the main loop. These methods are both blocking by default but can be made non-blocking by setting the `block` parameter to false. #322 --- src/PolykeyAgent.ts | 29 ++++- src/bootstrap/utils.ts | 4 + src/nodes/NodeConnectionManager.ts | 62 +++++---- src/nodes/NodeManager.ts | 120 ++---------------- src/nodes/SetNodeQueue.ts | 107 ++++++++++++++++ src/nodes/errors.ts | 6 + tests/agent/GRPCClientAgent.test.ts | 7 + tests/agent/service/notificationsSend.test.ts | 8 ++ .../gestaltsDiscoveryByIdentity.test.ts | 9 ++ .../service/gestaltsDiscoveryByNode.test.ts | 9 ++ .../gestaltsGestaltTrustByIdentity.test.ts | 15 ++- .../gestaltsGestaltTrustByNode.test.ts | 15 ++- tests/client/service/identitiesClaim.test.ts | 10 +- tests/client/service/nodesAdd.test.ts | 9 ++ tests/client/service/nodesClaim.test.ts | 13 +- tests/client/service/nodesFind.test.ts | 8 ++ tests/client/service/nodesPing.test.ts | 9 ++ .../client/service/notificationsClear.test.ts | 9 ++ .../client/service/notificationsRead.test.ts | 11 +- .../client/service/notificationsSend.test.ts | 11 +- tests/discovery/Discovery.test.ts | 15 ++- tests/nodes/NodeConnection.test.ts | 7 + .../NodeConnectionManager.general.test.ts | 13 ++ .../NodeConnectionManager.lifecycle.test.ts | 19 +++ .../NodeConnectionManager.seednodes.test.ts | 25 ++++ .../NodeConnectionManager.termination.test.ts | 10 ++ .../NodeConnectionManager.timeout.test.ts | 4 + tests/nodes/NodeManager.test.ts | 71 ++++++++++- .../NotificationsManager.test.ts | 7 + tests/vaults/VaultManager.test.ts | 3 + 30 files changed, 494 insertions(+), 151 deletions(-) create mode 100644 src/nodes/SetNodeQueue.ts diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index ddc7ca2cd..2b8951d91 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -34,6 +34,7 @@ import * as errors from './errors'; import * as utils from './utils'; import * as keysUtils from './keys/utils'; import * as nodesUtils from './nodes/utils'; +import SetNodeQueue from './nodes/SetNodeQueue'; type NetworkConfig = { forwardHost?: Host; @@ -87,6 +88,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + setNodeQueue, nodeConnectionManager, nodeManager, discovery, @@ -132,6 +134,7 @@ class PolykeyAgent { gestaltGraph?: GestaltGraph; proxy?: Proxy; nodeGraph?: NodeGraph; + setNodeQueue?: SetNodeQueue; nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; @@ -281,12 +284,18 @@ class PolykeyAgent { keyManager, logger: logger.getChild(NodeGraph.name), })); + setNodeQueue = + setNodeQueue ?? + new SetNodeQueue({ + logger: logger.getChild(SetNodeQueue.name), + }); nodeConnectionManager = nodeConnectionManager ?? new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, seedNodes, ...nodeConnectionManagerConfig_, logger: logger.getChild(NodeConnectionManager.name), @@ -299,6 +308,7 @@ class PolykeyAgent { keyManager, nodeGraph, nodeConnectionManager, + setNodeQueue, logger: logger.getChild(NodeManager.name), }); await nodeManager.start(); @@ -385,6 +395,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + setNodeQueue, nodeConnectionManager, nodeManager, discovery, @@ -417,6 +428,7 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly proxy: Proxy; public readonly nodeGraph: NodeGraph; + public readonly setNodeQueue: SetNodeQueue; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; @@ -441,6 +453,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + setNodeQueue, nodeConnectionManager, nodeManager, discovery, @@ -464,6 +477,7 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; proxy: Proxy; nodeGraph: NodeGraph; + setNodeQueue: SetNodeQueue; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; @@ -489,6 +503,7 @@ class PolykeyAgent { this.proxy = proxy; this.discovery = discovery; this.nodeGraph = nodeGraph; + this.setNodeQueue = setNodeQueue; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.vaultManager = vaultManager; @@ -562,10 +577,14 @@ class PolykeyAgent { ); // Reverse connection was established and authenticated, // add it to the node graph - await this.nodeManager.setNode(data.remoteNodeId, { - host: data.remoteHost, - port: data.remotePort, - }); + await this.nodeManager.setNode( + data.remoteNodeId, + { + host: data.remoteHost, + port: data.remotePort, + }, + false, + ); } }, ); @@ -647,6 +666,7 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); + await this.setNodeQueue.start(); await this.nodeManager.start(); await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); @@ -704,6 +724,7 @@ class PolykeyAgent { await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); await this.nodeManager.stop(); + await this.setNodeQueue.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 422709b01..09aff4586 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -4,6 +4,7 @@ import path from 'path'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import * as bootstrapErrors from './errors'; +import SetNodeQueue from '../nodes/SetNodeQueue'; import { IdentitiesManager } from '../identities'; import { SessionManager } from '../sessions'; import { Status } from '../status'; @@ -141,10 +142,12 @@ async function bootstrapState({ keyManager, logger: logger.getChild(NodeGraph.name), }); + const setNodeQueue = new SetNodeQueue({ logger }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, logger: logger.getChild(NodeConnectionManager.name), }); const nodeManager = new NodeManager({ @@ -153,6 +156,7 @@ async function bootstrapState({ nodeGraph, nodeConnectionManager, sigchain, + setNodeQueue, logger: logger.getChild(NodeManager.name), }); const notificationsManager = diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index f7ea93732..7d6b5d694 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -4,6 +4,7 @@ import type Proxy from '../network/Proxy'; import type { Host, Hostname, Port } from '../network/types'; import type { Timer } from '../types'; import type NodeGraph from './NodeGraph'; +import type SetNodeQueue from './SetNodeQueue'; import type { NodeAddress, NodeData, @@ -60,6 +61,7 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; + protected setNodeQueue: SetNodeQueue; // NodeManager has to be passed in during start to allow co-dependency protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; @@ -80,6 +82,7 @@ class NodeConnectionManager { keyManager, nodeGraph, proxy, + setNodeQueue, seedNodes = {}, initialClosestNodes = 3, connConnectTime = 20000, @@ -89,6 +92,7 @@ class NodeConnectionManager { nodeGraph: NodeGraph; keyManager: KeyManager; proxy: Proxy; + setNodeQueue: SetNodeQueue; seedNodes?: SeedNodes; initialClosestNodes?: number; connConnectTime?: number; @@ -99,6 +103,7 @@ class NodeConnectionManager { this.keyManager = keyManager; this.nodeGraph = nodeGraph; this.proxy = proxy; + this.setNodeQueue = setNodeQueue; this.seedNodes = seedNodes; this.initialClosestNodes = initialClosestNodes; this.connConnectTime = connConnectTime; @@ -301,7 +306,7 @@ class NodeConnectionManager { }); // We can assume connection was established and destination was valid, // we can add the target to the nodeGraph - await this.nodeManager?.setNode(targetNodeId, targetAddress); + await this.nodeManager?.setNode(targetNodeId, targetAddress, false); // Creating TTL timeout const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); @@ -574,18 +579,12 @@ class NodeConnectionManager { /** * Perform an initial database synchronisation: get k of the closest nodes * from each seed node and add them to this database - * For now, we also attempt to establish a connection to each of them. - * If these nodes are offline, this will impose a performance penalty, - * so we should investigate performing this in the background if possible. - * Alternatively, we can also just add the nodes to our database without - * establishing connection. - * This has been removed from start() as there's a chicken-egg scenario - * where we require the NodeGraph instance to be created in order to get - * connections. - * @param timer Connection timeout timer + * Establish a proxy connection to each node before adding it + * By default this operation is blocking, set `block` to false to make it + * non-blocking */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async syncNodeGraph(timer?: Timer) { + public async syncNodeGraph(block: boolean = true, timer?: Timer) { for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { @@ -594,28 +593,43 @@ class NodeConnectionManager { if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; throw e; } - const nodes = await this.getRemoteNodeClosestNodes( seedNodeId, this.keyManager.getNodeId(), timer, ); for (const [nodeId, nodeData] of nodes) { - // FIXME: needs to ping the node right? we want to be non-blocking - try { - // FIXME: no tran needed - await this.nodeManager?.setNode(nodeId, nodeData.address); - } catch (e) { - if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + if (!block) { + this.setNodeQueue.queueSetNode(() => + this.nodeManager!.setNode(nodeId, nodeData.address), + ); + } else { + try { + // FIXME: no tran neededawait this.nodeManager?.setNode(nodeId, nodeData.address); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } } } // Refreshing every bucket above the closest node - const [closestNode] = ( - await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) - ).pop()!; - const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); - for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { - this.nodeManager?.refreshBucketQueueAdd(i); + if (!block) { + this.setNodeQueue.queueSetNode(async () => { + const [closestNode] = ( + await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) + ).pop()!; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + this.nodeManager?.refreshBucketQueueAdd(i); + } + }); + } else { + const [closestNode] = ( + await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) + ).pop()!; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + this.nodeManager?.refreshBucketQueueAdd(i); + } } } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 747ffe4b3..2bc76bccb 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,6 +1,7 @@ import type { DB, DBTransaction } from '@matrixai/db'; import type NodeConnectionManager from './NodeConnectionManager'; import type NodeGraph from './NodeGraph'; +import type SetNodeQueue from './SetNodeQueue'; import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; @@ -37,18 +38,7 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; - // SetNodeQueue - protected endQueue: boolean = false; - protected setNodeQueue: Array<{ - nodeId: NodeId; - nodeAddress: NodeAddress; - timeout?: number; - }> = []; - protected setNodeQueuePlug: Promise; - protected setNodeQueueUnplug: (() => void) | undefined; - protected setNodeQueueRunner: Promise; - protected setNodeQueueEmpty: Promise; - protected setNodeQueueDrained: () => void; + protected setNodeQueue: SetNodeQueue; // Refresh bucket timer protected refreshBucketDeadlineMap: Map = new Map(); protected refreshBucketTimer: NodeJS.Timer; @@ -67,6 +57,7 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, + setNodeQueue, refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds logger, }: { @@ -75,6 +66,7 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; + setNodeQueue: SetNodeQueue; refreshBucketTimerDefault?: number; logger?: Logger; }) { @@ -84,12 +76,12 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; + this.setNodeQueue = setNodeQueue; this.refreshBucketTimerDefault = refreshBucketTimerDefault; } public async start() { this.logger.info(`Starting ${this.constructor.name}`); - this.setNodeQueueRunner = this.startSetNodeQueue(); this.startRefreshBucketTimers(); this.refreshBucketQueueRunner = this.startRefreshBucketQueue(); this.logger.info(`Started ${this.constructor.name}`); @@ -97,7 +89,6 @@ class NodeManager { public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); - await this.stopSetNodeQueue(); await this.stopRefreshBucketTimers(); await this.stopRefreshBucketQueue(); this.logger.info(`Stopped ${this.constructor.name}`); @@ -400,11 +391,12 @@ class NodeManager { } /** - * Adds a node to the node graph. This assumes that you have already authenticated the node. - * Updates the node if the node already exists. + * Adds a node to the node graph. This assumes that you have already authenticated the node + * Updates the node if the node already exists + * This operation is blocking by default - set `block` to false to make it non-blocking * @param nodeId - Id of the node we wish to add * @param nodeAddress - Expected address of the node we want to add - * @param blocking - Flag for if the operation should block or utilize the async queue + * @param block - Flag for if the operation should block or utilize the async queue * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. * @param timeout Connection timeout timeout @@ -414,7 +406,7 @@ class NodeManager { public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - blocking: boolean = false, + block: boolean = true, force: boolean = false, timeout?: number, tran: DBTransaction, @@ -459,7 +451,7 @@ class NodeManager { // Updating the refreshBucket timer this.refreshBucketUpdateDeadline(bucketIndex); return; - } else if (blocking) { + } else if (block) { this.logger.debug( `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( nodeId, @@ -478,7 +470,9 @@ class NodeManager { )} to queue`, ); // Re-attempt this later asynchronously by adding the the queue - this.queueSetNode(nodeId, nodeAddress, timeout); + this.setNodeQueue.queueSetNode(() => + this.setNode(nodeId, nodeAddress, true, false, timeout), + ); } } } @@ -557,92 +551,6 @@ class NodeManager { // Return await this.nodeGraph.refreshBuckets(tran); } - // SetNode queue - - /** - * This adds a setNode operation to the queue - */ - private queueSetNode( - nodeId: NodeId, - nodeAddress: NodeAddress, - timeout?: number, - ): void { - this.logger.debug(`Adding ${nodesUtils.encodeNodeId(nodeId)} to queue`); - this.setNodeQueue.push({ - nodeId, - nodeAddress, - timeout, - }); - this.unplugQueue(); - } - - /** - * This starts the process of digesting the queue - */ - private async startSetNodeQueue(): Promise { - this.logger.debug('Starting setNodeQueue'); - this.plugQueue(); - // While queue hasn't ended - while (true) { - // Wait for queue to be unplugged - await this.setNodeQueuePlug; - if (this.endQueue) break; - const job = this.setNodeQueue.shift(); - if (job == null) { - // If the queue is empty then we pause the queue - this.plugQueue(); - continue; - } - // Process the job - this.logger.debug( - `SetNodeQueue processing job for: ${nodesUtils.encodeNodeId( - job.nodeId, - )}`, - ); - await this.setNode(job.nodeId, job.nodeAddress, true, false, job.timeout); - } - this.logger.debug('SetNodeQueue has ended'); - } - - private async stopSetNodeQueue(): Promise { - this.logger.debug('Stopping setNodeQueue'); - // Tell the queue runner to end - this.endQueue = true; - this.unplugQueue(); - // Wait for runner to finish it's current job - await this.setNodeQueueRunner; - } - - private plugQueue(): void { - if (this.setNodeQueueUnplug == null) { - this.logger.debug('Plugging setNodeQueue'); - // Pausing queue - this.setNodeQueuePlug = new Promise((resolve) => { - this.setNodeQueueUnplug = resolve; - }); - // Signaling queue is empty - if (this.setNodeQueueDrained != null) this.setNodeQueueDrained(); - } - } - - private unplugQueue(): void { - if (this.setNodeQueueUnplug != null) { - this.logger.debug('Unplugging setNodeQueue'); - // Starting queue - this.setNodeQueueUnplug(); - this.setNodeQueueUnplug = undefined; - // Signalling queue is running - this.setNodeQueueEmpty = new Promise((resolve) => { - this.setNodeQueueDrained = resolve; - }); - } - } - - @ready(new nodesErrors.ErrorNodeManagerNotRunning()) - public async queueDrained(): Promise { - await this.setNodeQueueEmpty; - } - /** * Kademlia refresh bucket operation. * It picks a random node within a bucket and does a search for that node. diff --git a/src/nodes/SetNodeQueue.ts b/src/nodes/SetNodeQueue.ts new file mode 100644 index 000000000..a405c3418 --- /dev/null +++ b/src/nodes/SetNodeQueue.ts @@ -0,0 +1,107 @@ +import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import * as nodesErrors from './errors'; + +interface SetNodeQueue extends StartStop {} +@StartStop() +class SetNodeQueue { + protected logger: Logger; + protected endQueue: boolean = false; + protected setNodeQueue: Array<() => Promise> = []; + protected setNodeQueuePlug: Promise; + protected setNodeQueueUnplug: (() => void) | undefined; + protected setNodeQueueRunner: Promise; + protected setNodeQueueEmpty: Promise; + protected setNodeQueueDrained: () => void; + + constructor({ logger }: { logger?: Logger }) { + this.logger = logger ?? new Logger(this.constructor.name); + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + this.setNodeQueueRunner = this.startSetNodeQueue(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + await this.stopSetNodeQueue(); + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * This adds a setNode operation to the queue + */ + public queueSetNode(f: () => Promise): void { + this.setNodeQueue.push(f); + this.unplugQueue(); + } + + /** + * This starts the process of digesting the queue + */ + private async startSetNodeQueue(): Promise { + this.logger.debug('Starting setNodeQueue'); + this.plugQueue(); + // While queue hasn't ended + while (true) { + // Wait for queue to be unplugged + await this.setNodeQueuePlug; + if (this.endQueue) break; + const job = this.setNodeQueue.shift(); + if (job == null) { + // If the queue is empty then we pause the queue + this.plugQueue(); + continue; + } + try { + await job(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + this.logger.debug('setNodeQueue has ended'); + } + + private async stopSetNodeQueue(): Promise { + this.logger.debug('Stopping setNodeQueue'); + // Tell the queue runner to end + this.endQueue = true; + this.unplugQueue(); + // Wait for runner to finish it's current job + await this.setNodeQueueRunner; + } + + private plugQueue(): void { + if (this.setNodeQueueUnplug == null) { + this.logger.debug('Plugging setNodeQueue'); + // Pausing queue + this.setNodeQueuePlug = new Promise((resolve) => { + this.setNodeQueueUnplug = resolve; + }); + // Signaling queue is empty + if (this.setNodeQueueDrained != null) this.setNodeQueueDrained(); + } + } + + private unplugQueue(): void { + if (this.setNodeQueueUnplug != null) { + this.logger.debug('Unplugging setNodeQueue'); + // Starting queue + this.setNodeQueueUnplug(); + this.setNodeQueueUnplug = undefined; + // Signalling queue is running + this.setNodeQueueEmpty = new Promise((resolve) => { + this.setNodeQueueDrained = resolve; + }); + } + } + + @ready(new nodesErrors.ErrorSetNodeQueueNotRunning()) + public async queueDrained(): Promise { + await this.setNodeQueueEmpty; + } +} + +export default SetNodeQueue; diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index a98fbcaa6..159021b9c 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -12,6 +12,11 @@ class ErrorNodeManagerNotRunning extends ErrorNodes { exitCode = sysexits.USAGE; } +class ErrorSetNodeQueueNotRunning extends ErrorNodes { + static description = 'SetNodeQueue is not running'; + exitCode = sysexits.USAGE; +} + class ErrorNodeGraphRunning extends ErrorNodes { static description = 'NodeGraph is running'; exitCode = sysexits.USAGE; @@ -86,6 +91,7 @@ export { ErrorNodes, ErrorNodeAborted, ErrorNodeManagerNotRunning, + ErrorSetNodeQueueNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 5cc38708a..2a9644055 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -22,6 +22,7 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as agentErrors from '@/agent/errors'; import * as keysUtils from '@/keys/utils'; import { timerStart } from '@/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testAgentUtils from './utils'; describe(GRPCClientAgent.name, () => { @@ -49,6 +50,7 @@ describe(GRPCClientAgent.name, () => { let keyManager: KeyManager; let vaultManager: VaultManager; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -110,10 +112,12 @@ describe(GRPCClientAgent.name, () => { keyManager, logger, }); + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, logger, }); nodeManager = new NodeManager({ @@ -122,8 +126,10 @@ describe(GRPCClientAgent.name, () => { keyManager: keyManager, nodeGraph: nodeGraph, nodeConnectionManager: nodeConnectionManager, + setNodeQueue, logger: logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -178,6 +184,7 @@ describe(GRPCClientAgent.name, () => { await sigchain.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index 46a9aa07e..9425743b8 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -27,6 +27,7 @@ import * as notificationsPB from '@/proto/js/polykey/v1/notifications/notificati import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as notificationsUtils from '@/notifications/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -40,6 +41,7 @@ describe('notificationsSend', () => { let senderKeyManager: KeyManager; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -109,10 +111,14 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -123,8 +129,10 @@ describe('notificationsSend', () => { nodeGraph, nodeConnectionManager, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index a9a4d7a17..6fec772c1 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -24,6 +24,7 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as identitiesPB from '@/proto/js/polykey/v1/identities/identities_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('gestaltsDiscoveryByIdentity', () => { @@ -59,6 +60,7 @@ describe('gestaltsDiscoveryByIdentity', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -125,10 +127,14 @@ describe('gestaltsDiscoveryByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -139,8 +145,10 @@ describe('gestaltsDiscoveryByIdentity', () => { nodeConnectionManager, nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ @@ -179,6 +187,7 @@ describe('gestaltsDiscoveryByIdentity', () => { await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index e34f5f8ed..1e97b6250 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -25,6 +25,7 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import * as testNodesUtils from '../../nodes/utils'; @@ -60,6 +61,7 @@ describe('gestaltsDiscoveryByNode', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -126,10 +128,14 @@ describe('gestaltsDiscoveryByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -140,8 +146,10 @@ describe('gestaltsDiscoveryByNode', () => { nodeConnectionManager, nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ @@ -180,6 +188,7 @@ describe('gestaltsDiscoveryByNode', () => { await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index 949a5f5e4..17de35e72 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -31,6 +31,7 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; import * as nodesUtils from '@/nodes/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; import { expectRemoteError } from '../../utils'; @@ -115,6 +116,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let setNodeQueue: SetNodeQueue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -191,10 +193,14 @@ describe('gestaltsGestaltTrustByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -202,11 +208,13 @@ describe('gestaltsGestaltTrustByIdentity', () => { nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + setNodeQueue, + logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { @@ -250,6 +258,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index d8ecae06e..5a409c120 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -33,6 +33,7 @@ import * as claimsUtils from '@/claims/utils'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; import * as nodesUtils from '@/nodes/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; @@ -114,6 +115,7 @@ describe('gestaltsGestaltTrustByNode', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let setNodeQueue: SetNodeQueue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -190,10 +192,14 @@ describe('gestaltsGestaltTrustByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -201,11 +207,13 @@ describe('gestaltsGestaltTrustByNode', () => { nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + setNodeQueue, + logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { @@ -249,6 +257,7 @@ describe('gestaltsGestaltTrustByNode', () => { await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index f03e1be07..5038821e9 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -26,6 +26,7 @@ import * as keysUtils from '@/keys/utils'; import * as claimsUtils from '@/claims/utils'; import * as nodesUtils from '@/nodes/utils'; import * as validationErrors from '@/validation/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; import { expectRemoteError } from '../../utils'; @@ -86,6 +87,7 @@ describe('identitiesClaim', () => { let testProvider: TestProvider; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -137,13 +139,18 @@ describe('identitiesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ connConnectTime: 2000, proxy, keyManager, nodeGraph, - logger: logger.getChild('nodeConnectionManager'), + setNodeQueue, + logger: logger.getChild('NodeConnectionManager'), }); + await setNodeQueue.start(); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const clientService = { identitiesClaim: identitiesClaim({ @@ -172,6 +179,7 @@ describe('identitiesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index 94d925acc..e6d037034 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -22,6 +22,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -50,6 +51,7 @@ describe('nodesAdd', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -96,10 +98,14 @@ describe('nodesAdd', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -110,8 +116,10 @@ describe('nodesAdd', () => { nodeConnectionManager, nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const clientService = { @@ -140,6 +148,7 @@ describe('nodesAdd', () => { await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await sigchain.stop(); await proxy.stop(); await db.stop(); diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 47102fe1a..21b6a4a5a 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -24,6 +24,7 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('nodesClaim', () => { @@ -75,6 +76,7 @@ describe('nodesClaim', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -126,10 +128,14 @@ describe('nodesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -137,11 +143,13 @@ describe('nodesClaim', () => { nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, + nodeGraph, + sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -179,6 +187,7 @@ describe('nodesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); await sigchain.stop(); diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index 21372cb4c..095139160 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -20,6 +20,7 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -56,6 +57,7 @@ describe('nodesFind', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -101,14 +103,19 @@ describe('nodesFind', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); + await setNodeQueue.start(); await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); const clientService = { nodesFind: nodesFind({ @@ -136,6 +143,7 @@ describe('nodesFind', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index 6fd489d36..bd1409b30 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -21,6 +21,7 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -55,6 +56,7 @@ describe('nodesPing', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -101,10 +103,14 @@ describe('nodesPing', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -115,8 +121,10 @@ describe('nodesPing', () => { nodeConnectionManager, nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesPing: nodesPing({ @@ -144,6 +152,7 @@ describe('nodesPing', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index c2a1c5cd3..7020f7d84 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -21,6 +21,7 @@ import { ClientServiceService } from '@/proto/js/polykey/v1/client_service_grpc_ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('notificationsClear', () => { @@ -53,6 +54,7 @@ describe('notificationsClear', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -105,10 +107,14 @@ describe('notificationsClear', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -119,8 +125,10 @@ describe('notificationsClear', () => { nodeConnectionManager, nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -159,6 +167,7 @@ describe('notificationsClear', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 24b8b9542..0f5b80610 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -23,6 +23,7 @@ import * as notificationsPB from '@/proto/js/polykey/v1/notifications/notificati import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import * as testNodesUtils from '../../nodes/utils'; @@ -128,6 +129,7 @@ describe('notificationsRead', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -180,10 +182,14 @@ describe('notificationsRead', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -191,11 +197,13 @@ describe('notificationsRead', () => { nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -235,6 +243,7 @@ describe('notificationsRead', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await proxy.stop(); await acl.stop(); await db.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 220edfe83..a0f471b58 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -24,6 +24,7 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as clientUtils from '@/client/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('notificationsSend', () => { @@ -63,6 +64,7 @@ describe('notificationsSend', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -114,10 +116,14 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -125,11 +131,13 @@ describe('notificationsSend', () => { nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -167,6 +175,7 @@ describe('notificationsSend', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index da9acd92b..04f5b8236 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -21,6 +21,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as claimsUtils from '@/claims/utils'; import * as discoveryErrors from '@/discovery/errors'; import * as keysUtils from '@/keys/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testNodesUtils from '../nodes/utils'; import * as testUtils from '../utils'; import TestProvider from '../identities/TestProvider'; @@ -47,6 +48,7 @@ describe('Discovery', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; @@ -130,10 +132,14 @@ describe('Discovery', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -141,11 +147,13 @@ describe('Discovery', () => { nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + setNodeQueue, + logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Set up other gestalt @@ -204,6 +212,7 @@ describe('Discovery', () => { await nodeB.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 35c084fa9..99de0bbe8 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -34,6 +34,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; import { timerStart } from '@/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; import * as grpcTestUtils from '../grpc/utils'; @@ -84,6 +85,7 @@ describe('${NodeConnection.name} test', () => { let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; let serverNodeGraph: NodeGraph; + let serverSetNodeQueue: SetNodeQueue; let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; @@ -231,10 +233,12 @@ describe('${NodeConnection.name} test', () => { logger, }); + serverSetNodeQueue = new SetNodeQueue({ logger }); serverNodeConnectionManager = new NodeConnectionManager({ keyManager: serverKeyManager, nodeGraph: serverNodeGraph, proxy: serverProxy, + setNodeQueue: serverSetNodeQueue, logger, }); serverNodeManager = new NodeManager({ @@ -243,8 +247,10 @@ describe('${NodeConnection.name} test', () => { keyManager: serverKeyManager, nodeGraph: serverNodeGraph, nodeConnectionManager: serverNodeConnectionManager, + setNodeQueue: serverSetNodeQueue, logger: logger, }); + await serverSetNodeQueue.start(); await serverNodeManager.start(); await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ @@ -356,6 +362,7 @@ describe('${NodeConnection.name} test', () => { await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); await serverNodeManager.stop(); + await serverSetNodeQueue.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentTestUtils.closeTestAgentServer(agentServer); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index 6231e5dcc..dd30f9049 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -20,6 +20,7 @@ import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { @@ -76,6 +77,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { let db: DB; let proxy: Proxy; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -191,6 +193,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); + await setNodeQueue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -216,6 +222,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); afterEach(async () => { + await setNodeQueue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -232,6 +239,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -259,6 +267,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -300,6 +309,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -353,6 +363,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: logger.getChild('NodeConnectionManager'), }); @@ -424,6 +435,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -461,6 +473,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 979403ec3..bf719789a 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -19,6 +19,7 @@ import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import { timerStart } from '@/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { const logger = new Logger( @@ -76,6 +77,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { let proxy: Proxy; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -154,6 +156,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + setNodeQueue = new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }); + await setNodeQueue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -179,6 +185,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); afterEach(async () => { + await setNodeQueue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -197,6 +204,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -222,6 +230,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -256,6 +265,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -284,6 +294,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -336,6 +347,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, connConnectTime: 500, logger: nodeConnectionManagerLogger, }); @@ -377,6 +389,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -403,6 +416,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -436,6 +450,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -469,6 +484,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -511,6 +527,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -531,6 +548,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -556,6 +574,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index 4d47afb0c..ae186f451 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -17,6 +17,7 @@ import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; describe(`${NodeConnectionManager.name} seed nodes test`, () => { const logger = new Logger( @@ -190,6 +191,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }), seedNodes: dummySeedNodes, logger: logger, }); @@ -213,6 +217,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: new SetNodeQueue({ + logger: logger.getChild('SetNodeQueue'), + }), seedNodes: dummySeedNodes, logger: logger, }); @@ -230,6 +237,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; + let setNodeQueue: SetNodeQueue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -245,10 +253,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, seedNodes, logger: logger, }); @@ -258,8 +268,10 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { logger, nodeConnectionManager, nodeGraph, + setNodeQueue, sigchain: {} as Sigchain, }); + await setNodeQueue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -278,11 +290,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); + await setNodeQueue?.stop(); } }); test('should call refreshBucket when syncing nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; + let setNodeQueue: SetNodeQueue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -298,10 +312,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, seedNodes, logger: logger, }); @@ -312,7 +328,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, + setNodeQueue, }); + await setNodeQueue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -330,11 +348,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); + await setNodeQueue?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; + let setNodeQueue: SetNodeQueue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -363,10 +383,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + setNodeQueue, seedNodes, connConnectTime: 500, logger: logger, @@ -378,7 +400,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, + setNodeQueue, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // This should complete without error @@ -390,6 +414,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeConnectionManager?.stop(); await nodeManager?.stop(); + await setNodeQueue?.stop(); } }); }); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 0422cf223..89358557b 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -2,6 +2,7 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; +import type SetNodeQueue from '@/nodes/SetNodeQueue'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -246,6 +247,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -286,6 +288,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -329,6 +332,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -372,6 +376,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -429,6 +434,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -508,6 +514,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -580,6 +587,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -657,6 +665,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); @@ -734,6 +743,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + setNodeQueue: {} as SetNodeQueue, logger: logger, connConnectTime: 2000, }); diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index 494350f52..e07958140 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,6 +1,7 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import type NodeManager from 'nodes/NodeManager'; +import type SetNodeQueue from '@/nodes/SetNodeQueue'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -188,6 +189,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); @@ -225,6 +227,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); @@ -278,6 +281,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index b83be35d8..7815c34e0 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -20,6 +20,7 @@ import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as nodesErrors from '@/nodes/errors'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as nodesTestUtils from './utils'; import { generateNodeIdForBucket } from './utils'; @@ -30,6 +31,7 @@ describe(`${NodeManager.name} test`, () => { ]); let dataDir: string; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let proxy: Proxy; let keyManager: KeyManager; @@ -111,9 +113,11 @@ describe(`${NodeManager.name} test`, () => { keyManager, logger, }); + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, + setNodeQueue, proxy, logger, }); @@ -122,6 +126,7 @@ describe(`${NodeManager.name} test`, () => { mockedPingNode.mockClear(); mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); + await setNodeQueue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await sigchain.stop(); @@ -167,6 +172,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, + setNodeQueue, logger, }); await nodeManager.start(); @@ -240,6 +246,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, + setNodeQueue, logger, }); await nodeManager.start(); @@ -427,6 +434,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, + setNodeQueue, logger, }); await nodeManager.start(); @@ -443,15 +451,18 @@ describe(`${NodeManager.name} test`, () => { }); }); test('should add a node when bucket has room', async () => { + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -467,18 +478,22 @@ describe(`${NodeManager.name} test`, () => { expect(bucket).toHaveLength(1); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should update a node if node exists', async () => { + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -506,18 +521,22 @@ describe(`${NodeManager.name} test`, () => { expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should not add node if bucket is full and old node is alive', async () => { + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -556,18 +575,22 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should add node if bucket is full, old node is alive and force is set', async () => { + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -608,18 +631,22 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should add node if bucket is full and old node is dead', async () => { + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -652,19 +679,23 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should add node when an incoming connection is established', async () => { let server: PolykeyAgent | undefined; + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); server = await PolykeyAgent.createPolykeyAgent({ @@ -704,19 +735,23 @@ describe(`${NodeManager.name} test`, () => { await server?.stop(); await server?.destroy(); await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should not add nodes to full bucket if pings succeeds', async () => { mockedPingNode.mockImplementation(async (_) => true); + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, logger, }); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const nodeId = keyManager.getNodeId(); @@ -742,18 +777,22 @@ describe(`${NodeManager.name} test`, () => { ); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should add nodes to full bucket if pings fail', async () => { mockedPingNode.mockImplementation(async (_) => true); + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -779,13 +818,14 @@ describe(`${NodeManager.name} test`, () => { await nodeManager.setNode(newNode1, address); await nodeManager.setNode(newNode2, address); await nodeManager.setNode(newNode3, address); - await nodeManager.queueDrained(); + await setNodeQueue.queueDrained(); const list = await listBucket(100); expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should not block when bucket is full', async () => { @@ -795,14 +835,17 @@ describe(`${NodeManager.name} test`, () => { logger, }); mockedPingNode.mockImplementation(async (_) => true); + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph: tempNodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -821,27 +864,32 @@ describe(`${NodeManager.name} test`, () => { return true; }); const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); + // Set manually to non-blocking await expect( - nodeManager.setNode(newNode4, address), + nodeManager.setNode(newNode4, address, false), ).resolves.toBeUndefined(); delayPing.resolveP(null); - await nodeManager.queueDrained(); + await setNodeQueue.queueDrained(); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); await tempNodeGraph.stop(); await tempNodeGraph.destroy(); } }); test('should block when blocking is set to true', async () => { mockedPingNode.mockImplementation(async (_) => true); + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -863,16 +911,19 @@ describe(`${NodeManager.name} test`, () => { expect(mockedPingNode).toBeCalled(); } finally { await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should update deadline when updating a bucket', async () => { const refreshBucketTimeout = 100000; + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -882,6 +933,7 @@ describe(`${NodeManager.name} test`, () => { ); try { mockRefreshBucket.mockImplementation(async () => {}); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // @ts-ignore: kidnap map @@ -901,16 +953,19 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should add buckets to the queue when exceeding deadline', async () => { const refreshBucketTimeout = 100; + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -924,6 +979,7 @@ describe(`${NodeManager.name} test`, () => { ); try { mockRefreshBucket.mockImplementation(async () => {}); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Getting starting value @@ -934,16 +990,19 @@ describe(`${NodeManager.name} test`, () => { mockRefreshBucketQueueAdd.mockRestore(); mockRefreshBucket.mockRestore(); await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should digest queue to refresh buckets', async () => { const refreshBucketTimeout = 1000000; + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -952,6 +1011,7 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); mockRefreshBucket.mockImplementation(async () => {}); @@ -968,16 +1028,19 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); + await setNodeQueue.stop(); } }); test('should abort refreshBucket queue when stopping', async () => { const refreshBucketTimeout = 1000000; + const setNodeQueue = new SetNodeQueue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, + setNodeQueue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -986,6 +1049,7 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); mockRefreshBucket.mockImplementation( @@ -1007,6 +1071,7 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); + await setNodeQueue.stop(); } }); }); diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index b382ea49d..8a498fdb7 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -22,6 +22,7 @@ import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; +import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../utils'; describe('NotificationsManager', () => { @@ -50,6 +51,7 @@ describe('NotificationsManager', () => { let acl: ACL; let db: DB; let nodeGraph: NodeGraph; + let setNodeQueue: SetNodeQueue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let keyManager: KeyManager; @@ -112,10 +114,12 @@ describe('NotificationsManager', () => { keyManager, logger, }); + setNodeQueue = new SetNodeQueue({ logger }); nodeConnectionManager = new NodeConnectionManager({ nodeGraph, keyManager, proxy, + setNodeQueue, logger, }); nodeManager = new NodeManager({ @@ -124,8 +128,10 @@ describe('NotificationsManager', () => { sigchain, nodeConnectionManager, nodeGraph, + setNodeQueue, logger, }); + await setNodeQueue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Set up node for receiving notifications @@ -147,6 +153,7 @@ describe('NotificationsManager', () => { }, global.defaultTimeout); afterAll(async () => { await receiver.stop(); + await setNodeQueue.stop(); await nodeConnectionManager.stop(); await nodeGraph.stop(); await proxy.stop(); diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 5236215b0..46257fa33 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -8,6 +8,7 @@ import type { import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; +import type SetNodeQueue from '@/nodes/SetNodeQueue'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -580,6 +581,7 @@ describe('VaultManager', () => { keyManager, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, logger, }); await nodeConnectionManager.start({ @@ -1497,6 +1499,7 @@ describe('VaultManager', () => { logger, nodeGraph, proxy, + setNodeQueue: {} as SetNodeQueue, connConnectTime: 1000, }); await nodeConnectionManager.start({ From 217c4498a178957f28de1035c0ae747815f9c91e Mon Sep 17 00:00:00 2001 From: Emma Casolin Date: Thu, 21 Apr 2022 15:00:11 +1000 Subject: [PATCH 24/39] fix: `syncNodeGraph` during agent startup is now non-blocking #322 --- src/PolykeyAgent.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 2b8951d91..fd52b14b7 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -670,7 +670,7 @@ class PolykeyAgent { await this.nodeManager.start(); await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); - await this.nodeConnectionManager.syncNodeGraph(); + await this.nodeConnectionManager.syncNodeGraph(false); await this.discovery.start({ fresh }); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); From 6fc465bd473d4ed30a7dc5845e72bcd0f09ae2ac Mon Sep 17 00:00:00 2001 From: Emma Casolin Date: Thu, 21 Apr 2022 16:35:32 +1000 Subject: [PATCH 25/39] refactor: renamed `SetNodeQueue` to `Queue` Renamed `queueStart` and `queuePush` since `Queue` is its own class now Simplified some logic using the `promise` utility --- package-lock.json | 5 + src/PolykeyAgent.ts | 32 ++--- src/bootstrap/utils.ts | 8 +- src/nodes/NodeConnectionManager.ts | 29 ++-- src/nodes/NodeManager.ts | 12 +- src/nodes/Queue.ts | 91 ++++++++++++ src/nodes/SetNodeQueue.ts | 107 -------------- src/nodes/errors.ts | 6 +- src/utils/utils.ts | 2 +- tests/agent/GRPCClientAgent.test.ts | 14 +- tests/agent/service/notificationsSend.test.ts | 14 +- .../gestaltsDiscoveryByIdentity.test.ts | 16 +-- .../service/gestaltsDiscoveryByNode.test.ts | 16 +-- .../gestaltsGestaltTrustByIdentity.test.ts | 16 +-- .../gestaltsGestaltTrustByNode.test.ts | 16 +-- tests/client/service/identitiesClaim.test.ts | 14 +- tests/client/service/nodesAdd.test.ts | 16 +-- tests/client/service/nodesClaim.test.ts | 16 +-- tests/client/service/nodesFind.test.ts | 14 +- tests/client/service/nodesPing.test.ts | 16 +-- .../client/service/notificationsClear.test.ts | 16 +-- .../client/service/notificationsRead.test.ts | 16 +-- .../client/service/notificationsSend.test.ts | 16 +-- tests/discovery/Discovery.test.ts | 16 +-- tests/nodes/NodeConnection.test.ts | 14 +- .../NodeConnectionManager.general.test.ts | 24 ++-- .../NodeConnectionManager.lifecycle.test.ts | 36 ++--- .../NodeConnectionManager.seednodes.test.ts | 46 +++--- .../NodeConnectionManager.termination.test.ts | 20 +-- .../NodeConnectionManager.timeout.test.ts | 8 +- tests/nodes/NodeManager.test.ts | 134 +++++++++--------- .../NotificationsManager.test.ts | 14 +- tests/vaults/VaultManager.test.ts | 6 +- 33 files changed, 405 insertions(+), 421 deletions(-) create mode 100644 src/nodes/Queue.ts delete mode 100644 src/nodes/SetNodeQueue.ts diff --git a/package-lock.json b/package-lock.json index ed112e333..5ad06be76 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16045,6 +16045,11 @@ } } }, + "node-abort-controller": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.0.1.tgz", + "integrity": "sha512-/ujIVxthRs+7q6hsdjHMaj8hRG9NuWmwrz+JdRwZ14jdFoKSkm+vDsCbF9PLpnSqjaWQJuTmVtcWHNLr+vrOFw==" + }, "node-fetch": { "version": "2.6.7", "requires": { diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index fd52b14b7..d5817347b 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -8,6 +8,7 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; +import Queue from './nodes/Queue'; import * as networkUtils from './network/utils'; import KeyManager from './keys/KeyManager'; import Status from './status/Status'; @@ -34,7 +35,6 @@ import * as errors from './errors'; import * as utils from './utils'; import * as keysUtils from './keys/utils'; import * as nodesUtils from './nodes/utils'; -import SetNodeQueue from './nodes/SetNodeQueue'; type NetworkConfig = { forwardHost?: Host; @@ -88,7 +88,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, - setNodeQueue, + queue, nodeConnectionManager, nodeManager, discovery, @@ -134,7 +134,7 @@ class PolykeyAgent { gestaltGraph?: GestaltGraph; proxy?: Proxy; nodeGraph?: NodeGraph; - setNodeQueue?: SetNodeQueue; + queue?: Queue; nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; @@ -284,10 +284,10 @@ class PolykeyAgent { keyManager, logger: logger.getChild(NodeGraph.name), })); - setNodeQueue = - setNodeQueue ?? - new SetNodeQueue({ - logger: logger.getChild(SetNodeQueue.name), + queue = + queue ?? + new Queue({ + logger: logger.getChild(Queue.name), }); nodeConnectionManager = nodeConnectionManager ?? @@ -295,7 +295,7 @@ class PolykeyAgent { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, seedNodes, ...nodeConnectionManagerConfig_, logger: logger.getChild(NodeConnectionManager.name), @@ -308,7 +308,7 @@ class PolykeyAgent { keyManager, nodeGraph, nodeConnectionManager, - setNodeQueue, + queue, logger: logger.getChild(NodeManager.name), }); await nodeManager.start(); @@ -395,7 +395,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, - setNodeQueue, + queue, nodeConnectionManager, nodeManager, discovery, @@ -428,7 +428,7 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly proxy: Proxy; public readonly nodeGraph: NodeGraph; - public readonly setNodeQueue: SetNodeQueue; + public readonly queue: Queue; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; @@ -453,7 +453,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, - setNodeQueue, + queue, nodeConnectionManager, nodeManager, discovery, @@ -477,7 +477,7 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; proxy: Proxy; nodeGraph: NodeGraph; - setNodeQueue: SetNodeQueue; + queue: Queue; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; @@ -503,7 +503,7 @@ class PolykeyAgent { this.proxy = proxy; this.discovery = discovery; this.nodeGraph = nodeGraph; - this.setNodeQueue = setNodeQueue; + this.queue = queue; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.vaultManager = vaultManager; @@ -666,7 +666,7 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); - await this.setNodeQueue.start(); + await this.queue.start(); await this.nodeManager.start(); await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); @@ -724,7 +724,7 @@ class PolykeyAgent { await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); await this.nodeManager.stop(); - await this.setNodeQueue.stop(); + await this.queue.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 09aff4586..60844fc19 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -4,7 +4,7 @@ import path from 'path'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import * as bootstrapErrors from './errors'; -import SetNodeQueue from '../nodes/SetNodeQueue'; +import Queue from '../nodes/Queue'; import { IdentitiesManager } from '../identities'; import { SessionManager } from '../sessions'; import { Status } from '../status'; @@ -142,12 +142,12 @@ async function bootstrapState({ keyManager, logger: logger.getChild(NodeGraph.name), }); - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: logger.getChild(NodeConnectionManager.name), }); const nodeManager = new NodeManager({ @@ -156,7 +156,7 @@ async function bootstrapState({ nodeGraph, nodeConnectionManager, sigchain, - setNodeQueue, + queue, logger: logger.getChild(NodeManager.name), }); const notificationsManager = diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 7d6b5d694..e2c133a5f 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -4,7 +4,7 @@ import type Proxy from '../network/Proxy'; import type { Host, Hostname, Port } from '../network/types'; import type { Timer } from '../types'; import type NodeGraph from './NodeGraph'; -import type SetNodeQueue from './SetNodeQueue'; +import type Queue from './Queue'; import type { NodeAddress, NodeData, @@ -61,7 +61,7 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; - protected setNodeQueue: SetNodeQueue; + protected queue: Queue; // NodeManager has to be passed in during start to allow co-dependency protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; @@ -82,7 +82,7 @@ class NodeConnectionManager { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, seedNodes = {}, initialClosestNodes = 3, connConnectTime = 20000, @@ -92,7 +92,7 @@ class NodeConnectionManager { nodeGraph: NodeGraph; keyManager: KeyManager; proxy: Proxy; - setNodeQueue: SetNodeQueue; + queue: Queue; seedNodes?: SeedNodes; initialClosestNodes?: number; connConnectTime?: number; @@ -103,7 +103,7 @@ class NodeConnectionManager { this.keyManager = keyManager; this.nodeGraph = nodeGraph; this.proxy = proxy; - this.setNodeQueue = setNodeQueue; + this.queue = queue; this.seedNodes = seedNodes; this.initialClosestNodes = initialClosestNodes; this.connConnectTime = connConnectTime; @@ -600,7 +600,7 @@ class NodeConnectionManager { ); for (const [nodeId, nodeData] of nodes) { if (!block) { - this.setNodeQueue.queueSetNode(() => + this.queue.push(() => this.nodeManager!.setNode(nodeId, nodeData.address), ); } else { @@ -612,17 +612,7 @@ class NodeConnectionManager { } } // Refreshing every bucket above the closest node - if (!block) { - this.setNodeQueue.queueSetNode(async () => { - const [closestNode] = ( - await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) - ).pop()!; - const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); - for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { - this.nodeManager?.refreshBucketQueueAdd(i); - } - }); - } else { + const refreshBuckets = async () => { const [closestNode] = ( await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) ).pop()!; @@ -630,6 +620,11 @@ class NodeConnectionManager { for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { this.nodeManager?.refreshBucketQueueAdd(i); } + }; + if (!block) { + this.queue.push(refreshBuckets); + } else { + await refreshBuckets(); } } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 2bc76bccb..79f00637b 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,7 +1,7 @@ import type { DB, DBTransaction } from '@matrixai/db'; import type NodeConnectionManager from './NodeConnectionManager'; import type NodeGraph from './NodeGraph'; -import type SetNodeQueue from './SetNodeQueue'; +import type Queue from './Queue'; import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; @@ -38,7 +38,7 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; - protected setNodeQueue: SetNodeQueue; + protected queue: Queue; // Refresh bucket timer protected refreshBucketDeadlineMap: Map = new Map(); protected refreshBucketTimer: NodeJS.Timer; @@ -57,7 +57,7 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, - setNodeQueue, + queue, refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds logger, }: { @@ -66,7 +66,7 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; - setNodeQueue: SetNodeQueue; + queue: Queue; refreshBucketTimerDefault?: number; logger?: Logger; }) { @@ -76,7 +76,7 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; - this.setNodeQueue = setNodeQueue; + this.queue = queue; this.refreshBucketTimerDefault = refreshBucketTimerDefault; } @@ -470,7 +470,7 @@ class NodeManager { )} to queue`, ); // Re-attempt this later asynchronously by adding the the queue - this.setNodeQueue.queueSetNode(() => + this.queue.push(() => this.setNode(nodeId, nodeAddress, true, false, timeout), ); } diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts new file mode 100644 index 000000000..441165237 --- /dev/null +++ b/src/nodes/Queue.ts @@ -0,0 +1,91 @@ +import type { PromiseType } from '../utils'; +import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import * as nodesErrors from './errors'; +import { promise } from '../utils'; + +interface Queue extends StartStop {} +@StartStop() +class Queue { + protected logger: Logger; + protected end: boolean = false; + protected queue: Array<() => Promise> = []; + protected runner: Promise; + protected plug_: PromiseType; + protected drained_: PromiseType; + + constructor({ logger }: { logger?: Logger }) { + this.logger = logger ?? new Logger(this.constructor.name); + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + const start = async () => { + this.logger.debug('Starting queue'); + this.plug(); + const pace = async () => { + await this.plug_.p; + return !this.end; + }; + // While queue hasn't ended + while (await pace()) { + const job = this.queue.shift(); + if (job == null) { + // If the queue is empty then we pause the queue + this.plug(); + continue; + } + try { + await job(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + this.logger.debug('queue has ended'); + }; + this.runner = start(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + this.logger.debug('Stopping queue'); + // Tell the queue runner to end + this.end = true; + this.unplug(); + // Wait for runner to finish it's current job + await this.runner; + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * This adds a setNode operation to the queue + */ + public push(f: () => Promise): void { + this.queue.push(f); + this.unplug(); + } + + @ready(new nodesErrors.ErrorQueueNotRunning()) + public async drained(): Promise { + await this.drained_.p; + } + + private plug(): void { + this.logger.debug('Plugging queue'); + // Pausing queue + this.plug_ = promise(); + // Signaling queue is empty + this.drained_.resolveP(); + } + + private unplug(): void { + this.logger.debug('Unplugging queue'); + // Starting queue + this.plug_.resolveP(); + // Signalling queue is running + this.drained_ = promise(); + } +} + +export default Queue; diff --git a/src/nodes/SetNodeQueue.ts b/src/nodes/SetNodeQueue.ts deleted file mode 100644 index a405c3418..000000000 --- a/src/nodes/SetNodeQueue.ts +++ /dev/null @@ -1,107 +0,0 @@ -import Logger from '@matrixai/logger'; -import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; -import * as nodesErrors from './errors'; - -interface SetNodeQueue extends StartStop {} -@StartStop() -class SetNodeQueue { - protected logger: Logger; - protected endQueue: boolean = false; - protected setNodeQueue: Array<() => Promise> = []; - protected setNodeQueuePlug: Promise; - protected setNodeQueueUnplug: (() => void) | undefined; - protected setNodeQueueRunner: Promise; - protected setNodeQueueEmpty: Promise; - protected setNodeQueueDrained: () => void; - - constructor({ logger }: { logger?: Logger }) { - this.logger = logger ?? new Logger(this.constructor.name); - } - - public async start() { - this.logger.info(`Starting ${this.constructor.name}`); - this.setNodeQueueRunner = this.startSetNodeQueue(); - this.logger.info(`Started ${this.constructor.name}`); - } - - public async stop() { - this.logger.info(`Stopping ${this.constructor.name}`); - await this.stopSetNodeQueue(); - this.logger.info(`Stopped ${this.constructor.name}`); - } - - /** - * This adds a setNode operation to the queue - */ - public queueSetNode(f: () => Promise): void { - this.setNodeQueue.push(f); - this.unplugQueue(); - } - - /** - * This starts the process of digesting the queue - */ - private async startSetNodeQueue(): Promise { - this.logger.debug('Starting setNodeQueue'); - this.plugQueue(); - // While queue hasn't ended - while (true) { - // Wait for queue to be unplugged - await this.setNodeQueuePlug; - if (this.endQueue) break; - const job = this.setNodeQueue.shift(); - if (job == null) { - // If the queue is empty then we pause the queue - this.plugQueue(); - continue; - } - try { - await job(); - } catch (e) { - if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; - } - } - this.logger.debug('setNodeQueue has ended'); - } - - private async stopSetNodeQueue(): Promise { - this.logger.debug('Stopping setNodeQueue'); - // Tell the queue runner to end - this.endQueue = true; - this.unplugQueue(); - // Wait for runner to finish it's current job - await this.setNodeQueueRunner; - } - - private plugQueue(): void { - if (this.setNodeQueueUnplug == null) { - this.logger.debug('Plugging setNodeQueue'); - // Pausing queue - this.setNodeQueuePlug = new Promise((resolve) => { - this.setNodeQueueUnplug = resolve; - }); - // Signaling queue is empty - if (this.setNodeQueueDrained != null) this.setNodeQueueDrained(); - } - } - - private unplugQueue(): void { - if (this.setNodeQueueUnplug != null) { - this.logger.debug('Unplugging setNodeQueue'); - // Starting queue - this.setNodeQueueUnplug(); - this.setNodeQueueUnplug = undefined; - // Signalling queue is running - this.setNodeQueueEmpty = new Promise((resolve) => { - this.setNodeQueueDrained = resolve; - }); - } - } - - @ready(new nodesErrors.ErrorSetNodeQueueNotRunning()) - public async queueDrained(): Promise { - await this.setNodeQueueEmpty; - } -} - -export default SetNodeQueue; diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index 159021b9c..ad5b31c90 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -12,8 +12,8 @@ class ErrorNodeManagerNotRunning extends ErrorNodes { exitCode = sysexits.USAGE; } -class ErrorSetNodeQueueNotRunning extends ErrorNodes { - static description = 'SetNodeQueue is not running'; +class ErrorQueueNotRunning extends ErrorNodes { + static description = 'queue is not running'; exitCode = sysexits.USAGE; } @@ -91,7 +91,7 @@ export { ErrorNodes, ErrorNodeAborted, ErrorNodeManagerNotRunning, - ErrorSetNodeQueueNotRunning, + ErrorQueueNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 0b99a8a43..168825b32 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -179,7 +179,7 @@ export type PromiseType = { /** * Deconstructed promise */ -function promise(): PromiseType { +function promise(): PromiseType { let resolveP, rejectP; const p = new Promise((resolve, reject) => { resolveP = resolve; diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 2a9644055..134273e30 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -6,6 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -22,7 +23,6 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as agentErrors from '@/agent/errors'; import * as keysUtils from '@/keys/utils'; import { timerStart } from '@/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testAgentUtils from './utils'; describe(GRPCClientAgent.name, () => { @@ -50,7 +50,7 @@ describe(GRPCClientAgent.name, () => { let keyManager: KeyManager; let vaultManager: VaultManager; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -112,12 +112,12 @@ describe(GRPCClientAgent.name, () => { keyManager, logger, }); - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger, }); nodeManager = new NodeManager({ @@ -126,10 +126,10 @@ describe(GRPCClientAgent.name, () => { keyManager: keyManager, nodeGraph: nodeGraph, nodeConnectionManager: nodeConnectionManager, - setNodeQueue, + queue, logger: logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -184,7 +184,7 @@ describe(GRPCClientAgent.name, () => { await sigchain.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index 9425743b8..4e584f57c 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -8,6 +8,7 @@ import { createPrivateKey, createPublicKey } from 'crypto'; import { exportJWK, SignJWT } from 'jose'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -27,7 +28,6 @@ import * as notificationsPB from '@/proto/js/polykey/v1/notifications/notificati import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as notificationsUtils from '@/notifications/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -41,7 +41,7 @@ describe('notificationsSend', () => { let senderKeyManager: KeyManager; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -111,14 +111,14 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -129,10 +129,10 @@ describe('notificationsSend', () => { nodeGraph, nodeConnectionManager, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index 6fec772c1..f9789cb60 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -24,7 +25,6 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as identitiesPB from '@/proto/js/polykey/v1/identities/identities_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('gestaltsDiscoveryByIdentity', () => { @@ -60,7 +60,7 @@ describe('gestaltsDiscoveryByIdentity', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -127,14 +127,14 @@ describe('gestaltsDiscoveryByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -145,10 +145,10 @@ describe('gestaltsDiscoveryByIdentity', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ @@ -187,7 +187,7 @@ describe('gestaltsDiscoveryByIdentity', () => { await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index 1e97b6250..3c0f00b10 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -25,7 +26,6 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import * as testNodesUtils from '../../nodes/utils'; @@ -61,7 +61,7 @@ describe('gestaltsDiscoveryByNode', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -128,14 +128,14 @@ describe('gestaltsDiscoveryByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -146,10 +146,10 @@ describe('gestaltsDiscoveryByNode', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ @@ -188,7 +188,7 @@ describe('gestaltsDiscoveryByNode', () => { await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index 17de35e72..01a162e31 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -9,6 +9,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -31,7 +32,6 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; import * as nodesUtils from '@/nodes/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; import { expectRemoteError } from '../../utils'; @@ -116,7 +116,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -193,14 +193,14 @@ describe('gestaltsGestaltTrustByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -211,10 +211,10 @@ describe('gestaltsGestaltTrustByIdentity', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { @@ -258,7 +258,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index 5a409c120..df84503a7 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -10,6 +10,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -33,7 +34,6 @@ import * as claimsUtils from '@/claims/utils'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; import * as nodesUtils from '@/nodes/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; @@ -115,7 +115,7 @@ describe('gestaltsGestaltTrustByNode', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -192,14 +192,14 @@ describe('gestaltsGestaltTrustByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -210,10 +210,10 @@ describe('gestaltsGestaltTrustByNode', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { @@ -257,7 +257,7 @@ describe('gestaltsGestaltTrustByNode', () => { await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index 5038821e9..3a17b79a8 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -9,6 +9,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import IdentitiesManager from '@/identities/IdentitiesManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -26,7 +27,6 @@ import * as keysUtils from '@/keys/utils'; import * as claimsUtils from '@/claims/utils'; import * as nodesUtils from '@/nodes/utils'; import * as validationErrors from '@/validation/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import TestProvider from '../../identities/TestProvider'; import { expectRemoteError } from '../../utils'; @@ -87,7 +87,7 @@ describe('identitiesClaim', () => { let testProvider: TestProvider; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -139,18 +139,18 @@ describe('identitiesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ connConnectTime: 2000, proxy, keyManager, nodeGraph, - setNodeQueue, + queue, logger: logger.getChild('NodeConnectionManager'), }); - await setNodeQueue.start(); + await queue.start(); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const clientService = { identitiesClaim: identitiesClaim({ @@ -179,7 +179,7 @@ describe('identitiesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index e6d037034..d912fb83a 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -22,7 +23,6 @@ import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -51,7 +51,7 @@ describe('nodesAdd', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -98,14 +98,14 @@ describe('nodesAdd', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -116,10 +116,10 @@ describe('nodesAdd', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const clientService = { @@ -148,7 +148,7 @@ describe('nodesAdd', () => { await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await db.stop(); diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 21b6a4a5a..5e7dedc8d 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -7,6 +7,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NotificationsManager from '@/notifications/NotificationsManager'; import ACL from '@/acl/ACL'; @@ -24,7 +25,6 @@ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('nodesClaim', () => { @@ -76,7 +76,7 @@ describe('nodesClaim', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -128,14 +128,14 @@ describe('nodesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -146,10 +146,10 @@ describe('nodesClaim', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -187,7 +187,7 @@ describe('nodesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); await sigchain.stop(); diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index 095139160..4ff59d9f1 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -20,7 +21,6 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -57,7 +57,7 @@ describe('nodesFind', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -103,19 +103,19 @@ describe('nodesFind', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await setNodeQueue.start(); + await queue.start(); await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); const clientService = { nodesFind: nodesFind({ @@ -143,7 +143,7 @@ describe('nodesFind', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index bd1409b30..14f9cbcee 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -21,7 +22,6 @@ import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as validationErrors from '@/validation/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import { expectRemoteError } from '../../utils'; @@ -56,7 +56,7 @@ describe('nodesPing', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -103,14 +103,14 @@ describe('nodesPing', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -121,10 +121,10 @@ describe('nodesPing', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesPing: nodesPing({ @@ -152,7 +152,7 @@ describe('nodesPing', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index 7020f7d84..73a5e3597 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -21,7 +22,6 @@ import { ClientServiceService } from '@/proto/js/polykey/v1/client_service_grpc_ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('notificationsClear', () => { @@ -54,7 +54,7 @@ describe('notificationsClear', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -107,14 +107,14 @@ describe('notificationsClear', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -125,10 +125,10 @@ describe('notificationsClear', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -167,7 +167,7 @@ describe('notificationsClear', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 0f5b80610..d5688e6c5 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -23,7 +24,6 @@ import * as notificationsPB from '@/proto/js/polykey/v1/notifications/notificati import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; import * as testNodesUtils from '../../nodes/utils'; @@ -129,7 +129,7 @@ describe('notificationsRead', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -182,14 +182,14 @@ describe('notificationsRead', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -200,10 +200,10 @@ describe('notificationsRead', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -243,7 +243,7 @@ describe('notificationsRead', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await proxy.stop(); await acl.stop(); await db.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index a0f471b58..6a2489bdf 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -24,7 +25,6 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as clientUtils from '@/client/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../../utils'; describe('notificationsSend', () => { @@ -64,7 +64,7 @@ describe('notificationsSend', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -116,14 +116,14 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -134,10 +134,10 @@ describe('notificationsSend', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); notificationsManager = @@ -175,7 +175,7 @@ describe('notificationsSend', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 04f5b8236..a267cc7d8 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -6,6 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import Discovery from '@/discovery/Discovery'; import GestaltGraph from '@/gestalts/GestaltGraph'; @@ -21,7 +22,6 @@ import * as nodesUtils from '@/nodes/utils'; import * as claimsUtils from '@/claims/utils'; import * as discoveryErrors from '@/discovery/errors'; import * as keysUtils from '@/keys/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testNodesUtils from '../nodes/utils'; import * as testUtils from '../utils'; import TestProvider from '../identities/TestProvider'; @@ -48,7 +48,7 @@ describe('Discovery', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; @@ -132,14 +132,14 @@ describe('Discovery', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -150,10 +150,10 @@ describe('Discovery', () => { nodeConnectionManager, nodeGraph, sigchain, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Set up other gestalt @@ -212,7 +212,7 @@ describe('Discovery', () => { await nodeB.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 99de0bbe8..725e6a684 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -34,7 +34,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; import { timerStart } from '@/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; +import Queue from '@/nodes/Queue'; import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; import * as grpcTestUtils from '../grpc/utils'; @@ -85,7 +85,7 @@ describe('${NodeConnection.name} test', () => { let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; let serverNodeGraph: NodeGraph; - let serverSetNodeQueue: SetNodeQueue; + let serverQueue: Queue; let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; @@ -233,12 +233,12 @@ describe('${NodeConnection.name} test', () => { logger, }); - serverSetNodeQueue = new SetNodeQueue({ logger }); + serverQueue = new Queue({ logger }); serverNodeConnectionManager = new NodeConnectionManager({ keyManager: serverKeyManager, nodeGraph: serverNodeGraph, proxy: serverProxy, - setNodeQueue: serverSetNodeQueue, + queue: serverQueue, logger, }); serverNodeManager = new NodeManager({ @@ -247,10 +247,10 @@ describe('${NodeConnection.name} test', () => { keyManager: serverKeyManager, nodeGraph: serverNodeGraph, nodeConnectionManager: serverNodeConnectionManager, - setNodeQueue: serverSetNodeQueue, + queue: serverQueue, logger: logger, }); - await serverSetNodeQueue.start(); + await serverQueue.start(); await serverNodeManager.start(); await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ @@ -362,7 +362,7 @@ describe('${NodeConnection.name} test', () => { await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); await serverNodeManager.stop(); - await serverSetNodeQueue.stop(); + await serverQueue.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentTestUtils.closeTestAgentServer(agentServer); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index dd30f9049..8905f8718 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -7,6 +7,7 @@ import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -20,7 +21,6 @@ import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { @@ -77,7 +77,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { let db: DB; let proxy: Proxy; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -193,10 +193,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); - await setNodeQueue.start(); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -222,7 +222,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); afterEach(async () => { - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -239,7 +239,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -267,7 +267,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -309,7 +309,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -363,7 +363,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: logger.getChild('NodeConnectionManager'), }); @@ -435,7 +435,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -473,7 +473,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index bf719789a..69b21c099 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -8,6 +8,7 @@ import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { withF } from '@matrixai/resources'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -19,7 +20,6 @@ import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import { timerStart } from '@/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { const logger = new Logger( @@ -77,7 +77,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { let proxy: Proxy; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -156,10 +156,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); - setNodeQueue = new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue = new Queue({ + logger: logger.getChild('queue'), }); - await setNodeQueue.start(); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -185,7 +185,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); afterEach(async () => { - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -204,7 +204,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -230,7 +230,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -265,7 +265,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -294,7 +294,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -347,7 +347,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, connConnectTime: 500, logger: nodeConnectionManagerLogger, }); @@ -389,7 +389,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -416,7 +416,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -450,7 +450,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -484,7 +484,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -527,7 +527,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -548,7 +548,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -574,7 +574,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue, + queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index ae186f451..d433dcbd1 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -17,7 +17,7 @@ import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; +import Queue from '@/nodes/Queue'; describe(`${NodeConnectionManager.name} seed nodes test`, () => { const logger = new Logger( @@ -191,8 +191,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue: new Queue({ + logger: logger.getChild('queue'), }), seedNodes: dummySeedNodes, logger: logger, @@ -217,8 +217,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: new SetNodeQueue({ - logger: logger.getChild('SetNodeQueue'), + queue: new Queue({ + logger: logger.getChild('queue'), }), seedNodes: dummySeedNodes, logger: logger, @@ -237,7 +237,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let setNodeQueue: SetNodeQueue | undefined; + let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -253,12 +253,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, seedNodes, logger: logger, }); @@ -268,10 +268,10 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { logger, nodeConnectionManager, nodeGraph, - setNodeQueue, + queue, sigchain: {} as Sigchain, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -290,13 +290,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); - await setNodeQueue?.stop(); + await queue?.stop(); } }); test('should call refreshBucket when syncing nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let setNodeQueue: SetNodeQueue | undefined; + let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -312,12 +312,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, seedNodes, logger: logger, }); @@ -328,9 +328,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, - setNodeQueue, + queue, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -348,13 +348,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); - await setNodeQueue?.stop(); + await queue?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let setNodeQueue: SetNodeQueue | undefined; + let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', @@ -383,12 +383,12 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - setNodeQueue, + queue, seedNodes, connConnectTime: 500, logger: logger, @@ -400,9 +400,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, - setNodeQueue, + queue, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // This should complete without error @@ -414,7 +414,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedRefreshBucket.mockRestore(); await nodeConnectionManager?.stop(); await nodeManager?.stop(); - await setNodeQueue?.stop(); + await queue?.stop(); } }); }); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 89358557b..c26a93c03 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -2,7 +2,7 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; -import type SetNodeQueue from '@/nodes/SetNodeQueue'; +import type Queue from '@/nodes/Queue'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -247,7 +247,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -288,7 +288,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -332,7 +332,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -376,7 +376,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -434,7 +434,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -514,7 +514,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -587,7 +587,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -665,7 +665,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); @@ -743,7 +743,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index e07958140..3f73a1a39 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,7 +1,7 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import type NodeManager from 'nodes/NodeManager'; -import type SetNodeQueue from '@/nodes/SetNodeQueue'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -189,7 +189,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); @@ -227,7 +227,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); @@ -281,7 +281,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index 7815c34e0..f40ce814e 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -7,6 +7,7 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import UTP from 'utp-native'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; @@ -20,7 +21,6 @@ import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as nodesErrors from '@/nodes/errors'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as nodesTestUtils from './utils'; import { generateNodeIdForBucket } from './utils'; @@ -31,7 +31,7 @@ describe(`${NodeManager.name} test`, () => { ]); let dataDir: string; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let proxy: Proxy; let keyManager: KeyManager; @@ -113,11 +113,11 @@ describe(`${NodeManager.name} test`, () => { keyManager, logger, }); - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, - setNodeQueue, + queue, proxy, logger, }); @@ -126,7 +126,7 @@ describe(`${NodeManager.name} test`, () => { mockedPingNode.mockClear(); mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await sigchain.stop(); @@ -172,7 +172,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - setNodeQueue, + queue, logger, }); await nodeManager.start(); @@ -246,7 +246,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - setNodeQueue, + queue, logger, }); await nodeManager.start(); @@ -434,7 +434,7 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - setNodeQueue, + queue, logger, }); await nodeManager.start(); @@ -451,18 +451,18 @@ describe(`${NodeManager.name} test`, () => { }); }); test('should add a node when bucket has room', async () => { - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -478,22 +478,22 @@ describe(`${NodeManager.name} test`, () => { expect(bucket).toHaveLength(1); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should update a node if node exists', async () => { - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -521,22 +521,22 @@ describe(`${NodeManager.name} test`, () => { expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should not add node if bucket is full and old node is alive', async () => { - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -575,22 +575,22 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should add node if bucket is full, old node is alive and force is set', async () => { - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -631,22 +631,22 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should add node if bucket is full and old node is dead', async () => { - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const localNodeId = keyManager.getNodeId(); @@ -679,23 +679,23 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should add node when an incoming connection is established', async () => { let server: PolykeyAgent | undefined; - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); server = await PolykeyAgent.createPolykeyAgent({ @@ -735,23 +735,23 @@ describe(`${NodeManager.name} test`, () => { await server?.stop(); await server?.destroy(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should not add nodes to full bucket if pings succeeds', async () => { mockedPingNode.mockImplementation(async (_) => true); - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, logger, }); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); const nodeId = keyManager.getNodeId(); @@ -777,22 +777,22 @@ describe(`${NodeManager.name} test`, () => { ); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should add nodes to full bucket if pings fail', async () => { mockedPingNode.mockImplementation(async (_) => true); - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -818,14 +818,14 @@ describe(`${NodeManager.name} test`, () => { await nodeManager.setNode(newNode1, address); await nodeManager.setNode(newNode2, address); await nodeManager.setNode(newNode3, address); - await setNodeQueue.queueDrained(); + await queue.drained(); const list = await listBucket(100); expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should not block when bucket is full', async () => { @@ -835,17 +835,17 @@ describe(`${NodeManager.name} test`, () => { logger, }); mockedPingNode.mockImplementation(async (_) => true); - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph: tempNodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -868,28 +868,28 @@ describe(`${NodeManager.name} test`, () => { await expect( nodeManager.setNode(newNode4, address, false), ).resolves.toBeUndefined(); - delayPing.resolveP(null); - await setNodeQueue.queueDrained(); + delayPing.resolveP(); + await queue.drained(); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); await tempNodeGraph.stop(); await tempNodeGraph.destroy(); } }); test('should block when blocking is set to true', async () => { mockedPingNode.mockImplementation(async (_) => true); - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); @@ -911,19 +911,19 @@ describe(`${NodeManager.name} test`, () => { expect(mockedPingNode).toBeCalled(); } finally { await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should update deadline when updating a bucket', async () => { const refreshBucketTimeout = 100000; - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -933,7 +933,7 @@ describe(`${NodeManager.name} test`, () => { ); try { mockRefreshBucket.mockImplementation(async () => {}); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // @ts-ignore: kidnap map @@ -953,19 +953,19 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should add buckets to the queue when exceeding deadline', async () => { const refreshBucketTimeout = 100; - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -979,7 +979,7 @@ describe(`${NodeManager.name} test`, () => { ); try { mockRefreshBucket.mockImplementation(async () => {}); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Getting starting value @@ -990,19 +990,19 @@ describe(`${NodeManager.name} test`, () => { mockRefreshBucketQueueAdd.mockRestore(); mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should digest queue to refresh buckets', async () => { const refreshBucketTimeout = 1000000; - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -1011,7 +1011,7 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); mockRefreshBucket.mockImplementation(async () => {}); @@ -1028,19 +1028,19 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); test('should abort refreshBucket queue when stopping', async () => { const refreshBucketTimeout = 1000000; - const setNodeQueue = new SetNodeQueue({ logger }); + const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - setNodeQueue, + queue, refreshBucketTimerDefault: refreshBucketTimeout, logger, }); @@ -1049,7 +1049,7 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); mockRefreshBucket.mockImplementation( @@ -1071,7 +1071,7 @@ describe(`${NodeManager.name} test`, () => { } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await setNodeQueue.stop(); + await queue.stop(); } }); }); diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 8a498fdb7..946bdcef6 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -8,6 +8,7 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import ACL from '@/acl/ACL'; import Sigchain from '@/sigchain/Sigchain'; @@ -22,7 +23,6 @@ import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; -import SetNodeQueue from '@/nodes/SetNodeQueue'; import * as testUtils from '../utils'; describe('NotificationsManager', () => { @@ -51,7 +51,7 @@ describe('NotificationsManager', () => { let acl: ACL; let db: DB; let nodeGraph: NodeGraph; - let setNodeQueue: SetNodeQueue; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let keyManager: KeyManager; @@ -114,12 +114,12 @@ describe('NotificationsManager', () => { keyManager, logger, }); - setNodeQueue = new SetNodeQueue({ logger }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ nodeGraph, keyManager, proxy, - setNodeQueue, + queue, logger, }); nodeManager = new NodeManager({ @@ -128,10 +128,10 @@ describe('NotificationsManager', () => { sigchain, nodeConnectionManager, nodeGraph, - setNodeQueue, + queue, logger, }); - await setNodeQueue.start(); + await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); // Set up node for receiving notifications @@ -153,7 +153,7 @@ describe('NotificationsManager', () => { }, global.defaultTimeout); afterAll(async () => { await receiver.stop(); - await setNodeQueue.stop(); + await queue.stop(); await nodeConnectionManager.stop(); await nodeGraph.stop(); await proxy.stop(); diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 46257fa33..446c750c6 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -8,7 +8,7 @@ import type { import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; -import type SetNodeQueue from '@/nodes/SetNodeQueue'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -581,7 +581,7 @@ describe('VaultManager', () => { keyManager, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, logger, }); await nodeConnectionManager.start({ @@ -1499,7 +1499,7 @@ describe('VaultManager', () => { logger, nodeGraph, proxy, - setNodeQueue: {} as SetNodeQueue, + queue: {} as Queue, connConnectTime: 1000, }); await nodeConnectionManager.start({ From 4ed0870c65a2c9b7d940fdb2d9873ba9ee464f4f Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Wed, 27 Apr 2022 15:32:17 +1000 Subject: [PATCH 26/39] tests: general fixes for failing tests This contains fixes for failing tests as well as fixes for tests failing to exit when finished. --- src/PolykeyAgent.ts | 2 +- src/nodes/NodeManager.ts | 14 +- src/nodes/Queue.ts | 4 +- tests/agent/service/notificationsSend.test.ts | 2 + tests/client/service/keysKeyPairRenew.test.ts | 2 +- tests/client/service/keysKeyPairReset.test.ts | 2 +- tests/client/service/nodesAdd.test.ts | 3 +- tests/client/service/nodesClaim.test.ts | 1 + .../client/service/notificationsClear.test.ts | 1 + .../client/service/notificationsRead.test.ts | 1 + .../client/service/notificationsSend.test.ts | 1 + tests/nodes/NodeManager.test.ts | 56 +++-- .../NotificationsManager.test.ts | 1 + tests/utils.ts | 197 +++++++++--------- 14 files changed, 148 insertions(+), 139 deletions(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index d5817347b..781530290 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -551,7 +551,7 @@ class PolykeyAgent { await this.status.updateStatusLive({ nodeId: data.nodeId, }); - await this.nodeManager.refreshBuckets(); + await this.nodeManager.resetBuckets(); const tlsConfig = { keyPrivatePem: keysUtils.privateKeyToPem( data.rootKeyPair.privateKey, diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 79f00637b..a9df2bcce 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -533,22 +533,12 @@ class NodeManager { return await this.nodeGraph.unsetNode(nodeId, tran); } - // FIXME - // /** - // * Gets all buckets from the NodeGraph - // */ - // public async getAllBuckets(tran?: DBTransaction): Promise> { - // return await this.nodeGraph.getAllBuckets(tran); - // } - - // FIXME potentially confusing name, should we rename this to renewBuckets? /** * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. */ - public async refreshBuckets(tran?: DBTransaction): Promise { - throw Error('fixme'); - // Return await this.nodeGraph.refreshBuckets(tran); + public async resetBuckets(tran?: DBTransaction): Promise { + return await this.nodeGraph.resetBuckets(this.keyManager.getNodeId(tran)); } /** diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts index 441165237..0f9c1485e 100644 --- a/src/nodes/Queue.ts +++ b/src/nodes/Queue.ts @@ -11,8 +11,8 @@ class Queue { protected end: boolean = false; protected queue: Array<() => Promise> = []; protected runner: Promise; - protected plug_: PromiseType; - protected drained_: PromiseType; + protected plug_: PromiseType = promise(); + protected drained_: PromiseType = promise(); constructor({ logger }: { logger?: Logger }) { this.logger = logger ?? new Logger(this.constructor.name); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index 4e584f57c..6d08b842a 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -169,6 +169,8 @@ describe('notificationsSend', () => { await grpcServer.stop(); await notificationsManager.stop(); await nodeConnectionManager.stop(); + await queue.stop(); + await nodeManager.stop(); await sigchain.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/keysKeyPairRenew.test.ts b/tests/client/service/keysKeyPairRenew.test.ts index a36c621c1..47445ead0 100644 --- a/tests/client/service/keysKeyPairRenew.test.ts +++ b/tests/client/service/keysKeyPairRenew.test.ts @@ -32,7 +32,7 @@ describe('keysKeyPairRenew', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/keysKeyPairReset.test.ts b/tests/client/service/keysKeyPairReset.test.ts index 335d5c5fd..55af8f35c 100644 --- a/tests/client/service/keysKeyPairReset.test.ts +++ b/tests/client/service/keysKeyPairReset.test.ts @@ -32,7 +32,7 @@ describe('keysKeyPairReset', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index d912fb83a..f2c4969a0 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -148,6 +148,7 @@ describe('nodesAdd', () => { await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await queue.stop(); await sigchain.stop(); await proxy.stop(); @@ -176,7 +177,7 @@ describe('nodesAdd', () => { )!, ); expect(result).toBeDefined(); - expect(result!.address).toBe('127.0.0.1:11111'); + expect(result!.address).toEqual({ host: '127.0.0.1', port: 11111 }); }); test('cannot add invalid node', async () => { // Invalid host diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 5e7dedc8d..95eaf8b6e 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -187,6 +187,7 @@ describe('nodesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await queue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index 73a5e3597..4a9002f21 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -167,6 +167,7 @@ describe('notificationsClear', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await queue.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index d5688e6c5..b5a3de17a 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -243,6 +243,7 @@ describe('notificationsRead', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await queue.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 6a2489bdf..35a6a15bb 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -175,6 +175,7 @@ describe('notificationsSend', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await queue.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index f40ce814e..66bd40999 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -147,6 +147,7 @@ describe(`${NodeManager.name} test`, () => { 'pings node', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -166,7 +167,7 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, @@ -213,6 +214,7 @@ describe(`${NodeManager.name} test`, () => { expect(active3).toBe(false); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -221,6 +223,7 @@ describe(`${NodeManager.name} test`, () => { ); // Ping needs to timeout (takes 20 seconds + setup + pulldown) test('getPublicKey', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -240,7 +243,7 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, @@ -258,6 +261,7 @@ describe(`${NodeManager.name} test`, () => { expect(key).toEqual(expectedKey); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -425,29 +429,34 @@ describe(`${NodeManager.name} test`, () => { } }); test('can request chain data', async () => { - // Cross signing claims - await y.nodeManager.claimNode(xNodeId); + let nodeManager: NodeManager | undefined; + try { + // Cross signing claims + await y.nodeManager.claimNode(xNodeId); - const nodeManager = new NodeManager({ - db, - sigchain, - keyManager, - nodeGraph, - nodeConnectionManager, - queue, - logger, - }); - await nodeManager.start(); - await nodeConnectionManager.start({ nodeManager }); + nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, + queue, + logger, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); - await nodeGraph.setNode(xNodeId, xNodeAddress); + await nodeGraph.setNode(xNodeId, xNodeAddress); - // We want to get the public key of the server - const chainData = JSON.stringify( - await nodeManager.requestChainData(xNodeId), - ); - expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); - expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + // We want to get the public key of the server + const chainData = JSON.stringify( + await nodeManager.requestChainData(xNodeId), + ); + expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); + expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + } finally { + await nodeManager?.stop(); + } }); }); test('should add a node when bucket has room', async () => { @@ -704,6 +713,9 @@ describe(`${NodeManager.name} test`, () => { keysConfig: { rootKeyPairBits: 2048, }, + networkConfig: { + proxyHost: localhost, + }, logger: logger, }); const serverNodeId = server.keyManager.getNodeId(); diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 946bdcef6..e2095f191 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -155,6 +155,7 @@ describe('NotificationsManager', () => { await receiver.stop(); await queue.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/utils.ts b/tests/utils.ts index c7636c4a5..84c67c90e 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -1,21 +1,21 @@ -// Import type { StatusLive } from '@/status/types'; +import type { Host } from '@/network/types'; import type { NodeId } from '@/nodes/types'; -// import type { Host } from '@/network/types'; +import type { StatusLive } from '@/status/types'; import path from 'path'; import fs from 'fs'; import lock from 'fd-lock'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; -// Import PolykeyAgent from '@/PolykeyAgent'; -// import Status from '@/status/Status'; -// import GRPCClientClient from '@/client/GRPCClientClient'; -// import * as clientUtils from '@/client/utils'; +import PolykeyAgent from '@/PolykeyAgent'; +import Status from '@/status/Status'; +import GRPCClientClient from '@/client/GRPCClientClient'; +import * as clientUtils from '@/client/utils'; import * as keysUtils from '@/keys/utils'; -// Import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as grpcErrors from '@/grpc/errors'; import { sleep } from '@/utils'; import * as errors from '@/errors'; -// import config from '@/config'; +import config from '@/config'; /** * Setup the global keypair @@ -87,100 +87,99 @@ async function setupGlobalKeypair() { // * * Ensure server-side side-effects are removed at the end of each test // */ async function setupGlobalAgent( - _logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ + logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), ): Promise { - throw Error('not implemented'); - // Const globalAgentPassword = 'password'; - // const globalAgentDir = path.join(globalThis.dataDir, 'agent'); - // // The references directory will act like our reference count - // await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { - // recursive: true, - // }); - // const pid = process.pid.toString(); - // // Plus 1 to the reference count - // await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); - // const globalAgentLock = await fs.promises.open( - // path.join(globalThis.dataDir, 'agent.lock'), - // fs.constants.O_WRONLY | fs.constants.O_CREAT, - // ); - // while (!lock(globalAgentLock.fd)) { - // await sleep(1000); - // } - // const status = new Status({ - // statusPath: path.join(globalAgentDir, config.defaults.statusBase), - // statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), - // fs, - // }); - // let statusInfo = await status.readStatus(); - // if (statusInfo == null || statusInfo.status === 'DEAD') { - // await PolykeyAgent.createPolykeyAgent({ - // password: globalAgentPassword, - // nodePath: globalAgentDir, - // networkConfig: { - // proxyHost: '127.0.0.1' as Host, - // forwardHost: '127.0.0.1' as Host, - // agentHost: '127.0.0.1' as Host, - // clientHost: '127.0.0.1' as Host, - // }, - // keysConfig: { - // rootKeyPairBits: 2048, - // }, - // seedNodes: {}, // Explicitly no seed nodes on startup - // logger, - // }); - // statusInfo = await status.readStatus(); - // } - // return { - // globalAgentDir, - // globalAgentPassword, - // globalAgentStatus: statusInfo as StatusLive, - // globalAgentClose: async () => { - // // Closing the global agent cannot be done in the globalTeardown - // // This is due to a sequence of reasons: - // // 1. The global agent is not started as a separate process - // // 2. Because we need to be able to mock dependencies - // // 3. This means it is part of a jest worker process - // // 4. Which will block termination of the jest worker process - // // 5. Therefore globalTeardown will never get to execute - // // 6. The global agent is not part of globalSetup - // // 7. Because not all tests need the global agent - // // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes - // try { - // await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); - // // If the references directory is not empty - // // there are other processes still using the global agent - // try { - // await fs.promises.rmdir(path.join(globalAgentDir, 'references')); - // } catch (e) { - // if (e.code === 'ENOTEMPTY') { - // return; - // } - // throw e; - // } - // // Stopping may occur in a different jest worker process - // // therefore we cannot rely on pkAgent, but instead use GRPC - // const statusInfo = (await status.readStatus()) as StatusLive; - // const grpcClient = await GRPCClientClient.createGRPCClientClient({ - // nodeId: statusInfo.data.nodeId, - // host: statusInfo.data.clientHost, - // port: statusInfo.data.clientPort, - // tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, - // logger, - // }); - // const emptyMessage = new utilsPB.EmptyMessage(); - // const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); - // // This is asynchronous - // await grpcClient.agentStop(emptyMessage, meta); - // await grpcClient.destroy(); - // await status.waitFor('DEAD'); - // } finally { - // lock.unlock(globalAgentLock.fd); - // await globalAgentLock.close(); - // } - // }, - // }; + const globalAgentPassword = 'password'; + const globalAgentDir = path.join(globalThis.dataDir, 'agent'); + // The references directory will act like our reference count + await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { + recursive: true, + }); + const pid = process.pid.toString(); + // Plus 1 to the reference count + await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); + const globalAgentLock = await fs.promises.open( + path.join(globalThis.dataDir, 'agent.lock'), + fs.constants.O_WRONLY | fs.constants.O_CREAT, + ); + while (!lock(globalAgentLock.fd)) { + await sleep(1000); + } + const status = new Status({ + statusPath: path.join(globalAgentDir, config.defaults.statusBase), + statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), + fs, + }); + let statusInfo = await status.readStatus(); + if (statusInfo == null || statusInfo.status === 'DEAD') { + await PolykeyAgent.createPolykeyAgent({ + password: globalAgentPassword, + nodePath: globalAgentDir, + networkConfig: { + proxyHost: '127.0.0.1' as Host, + forwardHost: '127.0.0.1' as Host, + agentHost: '127.0.0.1' as Host, + clientHost: '127.0.0.1' as Host, + }, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + logger, + }); + statusInfo = await status.readStatus(); + } + return { + globalAgentDir, + globalAgentPassword, + globalAgentStatus: statusInfo as StatusLive, + globalAgentClose: async () => { + // Closing the global agent cannot be done in the globalTeardown + // This is due to a sequence of reasons: + // 1. The global agent is not started as a separate process + // 2. Because we need to be able to mock dependencies + // 3. This means it is part of a jest worker process + // 4. Which will block termination of the jest worker process + // 5. Therefore globalTeardown will never get to execute + // 6. The global agent is not part of globalSetup + // 7. Because not all tests need the global agent + // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes + try { + await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); + // If the references directory is not empty + // there are other processes still using the global agent + try { + await fs.promises.rmdir(path.join(globalAgentDir, 'references')); + } catch (e) { + if (e.code === 'ENOTEMPTY') { + return; + } + throw e; + } + // Stopping may occur in a different jest worker process + // therefore we cannot rely on pkAgent, but instead use GRPC + const statusInfo = (await status.readStatus()) as StatusLive; + const grpcClient = await GRPCClientClient.createGRPCClientClient({ + nodeId: statusInfo.data.nodeId, + host: statusInfo.data.clientHost, + port: statusInfo.data.clientPort, + tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, + logger, + }); + const emptyMessage = new utilsPB.EmptyMessage(); + const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); + // This is asynchronous + await grpcClient.agentStop(emptyMessage, meta); + await grpcClient.destroy(); + await status.waitFor('DEAD'); + } finally { + lock.unlock(globalAgentLock.fd); + await globalAgentLock.close(); + } + }, + }; } function generateRandomNodeId(): NodeId { From 8f4398f65193038e6cab5e789a233334e1d6bd3e Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Wed, 27 Apr 2022 18:50:53 +1000 Subject: [PATCH 27/39] syntax: added `@typescript-eslint/await-thenable` linting rule This checks if we await things that are not promises. This is not a problem per se, but we generally don't want to await random things. --- .eslintrc | 1 + src/acl/ACL.ts | 2 +- src/claims/utils.ts | 6 ++--- .../providers/github/GitHubProvider.ts | 2 +- src/keys/utils.ts | 2 +- src/nodes/NodeConnectionManager.ts | 2 +- src/nodes/NodeGraph.ts | 2 ++ src/sigchain/utils.ts | 2 +- tests/claims/utils.test.ts | 4 +--- tests/keys/KeyManager.test.ts | 4 ++-- .../NodeConnectionManager.lifecycle.test.ts | 2 +- .../NodeConnectionManager.termination.test.ts | 2 +- tests/sigchain/Sigchain.test.ts | 2 +- tests/vaults/VaultInternal.test.ts | 2 +- tests/vaults/VaultManager.test.ts | 22 +++++++++---------- tests/vaults/VaultOps.test.ts | 2 +- 16 files changed, 30 insertions(+), 29 deletions(-) diff --git a/.eslintrc b/.eslintrc index 5b6e8f753..bc8d334e5 100644 --- a/.eslintrc +++ b/.eslintrc @@ -111,6 +111,7 @@ "@typescript-eslint/no-misused-promises": ["error", { "checksVoidReturn": false }], + "@typescript-eslint/await-thenable": ["error"], "@typescript-eslint/naming-convention": [ "error", { diff --git a/src/acl/ACL.ts b/src/acl/ACL.ts index ac83ade13..7d737e04c 100644 --- a/src/acl/ACL.ts +++ b/src/acl/ACL.ts @@ -321,7 +321,7 @@ class ACL { true, ); if (permId == null) { - const permId = this.generatePermId(); + const permId = await this.generatePermId(); const permRef = { count: 1, object: { diff --git a/src/claims/utils.ts b/src/claims/utils.ts index faee8ea4b..ea5ecf15d 100644 --- a/src/claims/utils.ts +++ b/src/claims/utils.ts @@ -62,7 +62,7 @@ async function createClaim({ const byteEncoder = new TextEncoder(); const claim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); claim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await claim.sign(); return signedClaim as ClaimEncoded; @@ -83,14 +83,14 @@ async function signExistingClaim({ kid: NodeIdEncoded; alg?: string; }): Promise { - const decodedClaim = await decodeClaim(claim); + const decodedClaim = decodeClaim(claim); // Reconstruct the claim with our own signature // Make the payload contents deterministic const canonicalizedPayload = canonicalize(decodedClaim.payload); const byteEncoder = new TextEncoder(); const newClaim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); newClaim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await newClaim.sign(); // Add our signature to the existing claim diff --git a/src/identities/providers/github/GitHubProvider.ts b/src/identities/providers/github/GitHubProvider.ts index 8fd0a79fe..bfbce7766 100644 --- a/src/identities/providers/github/GitHubProvider.ts +++ b/src/identities/providers/github/GitHubProvider.ts @@ -514,7 +514,7 @@ class GitHubProvider extends Provider { ); } const data = await response.text(); - const claimIds = await this.extractClaimIds(data); + const claimIds = this.extractClaimIds(data); for (const claimId of claimIds) { const claim = await this.getClaim(authIdentityId, claimId); if (claim != null) { diff --git a/src/keys/utils.ts b/src/keys/utils.ts index e36849f47..32c26eea2 100644 --- a/src/keys/utils.ts +++ b/src/keys/utils.ts @@ -508,7 +508,7 @@ function publicKeyBitSize(publicKey: PublicKey): number { } async function getRandomBytes(size: number): Promise { - return Buffer.from(await random.getBytes(size), 'binary'); + return Buffer.from(random.getBytes(size), 'binary'); } function getRandomBytesSync(size: number): Buffer { diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index e2c133a5f..ce37d0a94 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -550,7 +550,7 @@ class NodeConnectionManager { return this.withConnF( nodeId, async (connection) => { - const client = await connection.getClient(); + const client = connection.getClient(); const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); const nodes: Array<[NodeId, NodeData]> = []; // Loop over each map element (from the returned response) and populate nodes diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 3baf60299..f4f7ae188 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -379,6 +379,7 @@ class NodeGraph { const nodeId = IdInternal.fromBuffer(nodeIdBuffer); bucketDbIterator.seek(nodeIdBuffer); // @ts-ignore + // eslint-disable-next-line const iteratorResult = await bucketDbIterator.next(); if (iteratorResult == null) never(); const [, nodeData] = iteratorResult; @@ -477,6 +478,7 @@ class NodeGraph { nodesUtils.parseLastUpdatedBucketsDbKey(key as unknown as Buffer); bucketsDbIterator.seek(nodesUtils.bucketsDbKey(bucketIndex_, nodeId)); // @ts-ignore + // eslint-disable-next-line const iteratorResult = await bucketsDbIterator.next(); if (iteratorResult == null) never(); const [, nodeData] = iteratorResult; diff --git a/src/sigchain/utils.ts b/src/sigchain/utils.ts index 7f40dd6a3..fe8cc83f8 100644 --- a/src/sigchain/utils.ts +++ b/src/sigchain/utils.ts @@ -19,7 +19,7 @@ async function verifyChainData( continue; } // If verified, add the claim to the decoded chain - decodedChain[claimId] = await claimsUtils.decodeClaim(encodedClaim); + decodedChain[claimId] = claimsUtils.decodeClaim(encodedClaim); } return decodedChain; } diff --git a/tests/claims/utils.test.ts b/tests/claims/utils.test.ts index 069a6dcef..e57403683 100644 --- a/tests/claims/utils.test.ts +++ b/tests/claims/utils.test.ts @@ -328,9 +328,7 @@ describe('claims/utils', () => { // Create some dummy public key, and check that this does not verify const dummyKeyPair = await keysUtils.generateKeyPair(2048); - const dummyPublicKey = await keysUtils.publicKeyToPem( - dummyKeyPair.publicKey, - ); + const dummyPublicKey = keysUtils.publicKeyToPem(dummyKeyPair.publicKey); expect(await claimsUtils.verifyClaimSignature(claim, dummyPublicKey)).toBe( false, ); diff --git a/tests/keys/KeyManager.test.ts b/tests/keys/KeyManager.test.ts index 260346bc6..c1aaa345e 100644 --- a/tests/keys/KeyManager.test.ts +++ b/tests/keys/KeyManager.test.ts @@ -88,9 +88,9 @@ describe('KeyManager', () => { expect(keysPathContents).toContain('root_certs'); expect(keysPathContents).toContain('db.key'); expect(keyManager.dbKey.toString()).toBeTruthy(); - const rootKeyPairPem = await keyManager.getRootKeyPairPem(); + const rootKeyPairPem = keyManager.getRootKeyPairPem(); expect(rootKeyPairPem).not.toBeUndefined(); - const rootCertPem = await keyManager.getRootCertPem(); + const rootCertPem = keyManager.getRootCertPem(); expect(rootCertPem).not.toBeUndefined(); const rootCertPems = await keyManager.getRootCertChainPems(); expect(rootCertPems.length).toBe(1); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 69b21c099..a6f9d04e7 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -313,7 +313,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }; // Creating the generator - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( remoteNodeId1, async function* () { yield* testGenerator(); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index c26a93c03..86598e78c 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -609,7 +609,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { const firstConnection = firstConnAndLock?.connection; // Resolves if the shutdownCallback was called - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( agentNodeId, async function* (): AsyncGenerator { // Throw an error here diff --git a/tests/sigchain/Sigchain.test.ts b/tests/sigchain/Sigchain.test.ts index e35a3c20a..a3bbfb193 100644 --- a/tests/sigchain/Sigchain.test.ts +++ b/tests/sigchain/Sigchain.test.ts @@ -237,7 +237,7 @@ describe('Sigchain', () => { expect(verified2).toBe(true); // Check the hash of the previous claim is correct - const verifiedHash = await claimsUtils.verifyHashOfClaim( + const verifiedHash = claimsUtils.verifyHashOfClaim( claim1, decoded2.payload.hPrev as string, ); diff --git a/tests/vaults/VaultInternal.test.ts b/tests/vaults/VaultInternal.test.ts index d91978c5a..ab817a538 100644 --- a/tests/vaults/VaultInternal.test.ts +++ b/tests/vaults/VaultInternal.test.ts @@ -668,7 +668,7 @@ describe('VaultInternal', () => { await efs.writeFile(secret2.name, secret2.content); }); const commit = (await vault.log())[0].commitId; - const gen = await vault.readG(async function* (efs): AsyncGenerator { + const gen = vault.readG(async function* (efs): AsyncGenerator { yield expect((await efs.readFile(secret1.name)).toString()).toEqual( secret1.content, ); diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 446c750c6..4cf5d50f9 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -1337,7 +1337,7 @@ describe('VaultManager', () => { }); await sleep(200); expect(pullVaultMock).not.toHaveBeenCalled(); - await releaseWrite(); + releaseWrite(); await pullP; expect(pullVaultMock).toHaveBeenCalled(); pullVaultMock.mockClear(); @@ -1363,16 +1363,16 @@ describe('VaultManager', () => { }); await sleep(200); expect(gitPullMock).not.toHaveBeenCalled(); - await releaseVaultWrite(); - await pullP2; - expect(gitPullMock).toHaveBeenCalled(); - } finally { - pullVaultMock.mockRestore(); - gitPullMock.mockRestore(); - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }, + releaseVaultWrite(); + await pullP2; + expect(gitPullMock).toHaveBeenCalled(); + } finally { + pullVaultMock.mockRestore(); + gitPullMock.mockRestore(); + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, global.failedConnectionTimeout, ); }); diff --git a/tests/vaults/VaultOps.test.ts b/tests/vaults/VaultOps.test.ts index c766ddd74..2152a567d 100644 --- a/tests/vaults/VaultOps.test.ts +++ b/tests/vaults/VaultOps.test.ts @@ -355,7 +355,7 @@ describe('VaultOps', () => { expect( (await vaultOps.getSecret(vault, '.hidingSecret')).toString(), ).toStrictEqual('change_contents'); - await expect( + expect( ( await vaultOps.getSecret(vault, '.hidingDir/.hiddenInSecret') ).toString(), From f444f62a7ee0ac243276a81070e011ff3cb113af Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 28 Apr 2022 16:00:16 +1000 Subject: [PATCH 28/39] fix: updated `@types/node-forge` version and fixed `keysUtils.getRandomBytes` --- package.json | 2 +- src/keys/utils.ts | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index d6c39d77e..3759aef1d 100644 --- a/package.json +++ b/package.json @@ -117,7 +117,7 @@ "@types/jest": "^27.0.2", "@types/nexpect": "^0.4.31", "@types/node": "^16.11.7", - "@types/node-forge": "^0.9.7", + "@types/node-forge": "^0.10.4", "@types/pako": "^1.0.2", "@types/prompts": "^2.0.13", "@types/readable-stream": "^2.3.11", diff --git a/src/keys/utils.ts b/src/keys/utils.ts index 32c26eea2..14b82a92d 100644 --- a/src/keys/utils.ts +++ b/src/keys/utils.ts @@ -508,7 +508,16 @@ function publicKeyBitSize(publicKey: PublicKey): number { } async function getRandomBytes(size: number): Promise { - return Buffer.from(random.getBytes(size), 'binary'); + const p = new Promise((resolve, reject) => { + random.getBytes(size, (e, bytes) => { + if (e != null) { + reject(e); + } else { + resolve(bytes); + } + }); + }); + return Buffer.from(await p, 'binary'); } function getRandomBytesSync(size: number): Buffer { From 9d28625111751eff81d1857c45b4ed5cae4a11c7 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 28 Apr 2022 19:10:10 +1000 Subject: [PATCH 29/39] tests: added test to check if nodes are properly added to the seed nodes when entering the network This tests for if the Seed node contains the new nodes when they are created. It also checks if the new nodes discover each other after being created. Includes a change to `findNode`. It will no longer throw an error when failing to find the node. This will have to be thrown by the caller now. This was required by `refreshBucket` since it's very likely that we can't find the random node it is looking for. --- src/client/service/nodesFind.ts | 1 + src/nodes/NodeConnectionManager.ts | 23 +++-- src/nodes/NodeManager.ts | 9 +- .../NodeConnectionManager.general.test.ts | 7 +- .../NodeConnectionManager.seednodes.test.ts | 89 ++++++++++++++++++- 5 files changed, 109 insertions(+), 20 deletions(-) diff --git a/src/client/service/nodesFind.ts b/src/client/service/nodesFind.ts index 6c2061719..324e1c0e9 100644 --- a/src/client/service/nodesFind.ts +++ b/src/client/service/nodesFind.ts @@ -50,6 +50,7 @@ function nodesFind({ }, ); const address = await nodeConnectionManager.findNode(nodeId); + if (address == null) throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); response .setNodeId(nodesUtils.encodeNodeId(nodeId)) .setAddress( diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index ce37d0a94..34f5f4d04 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -267,6 +267,9 @@ class NodeConnectionManager { // Creating the connection and set in map // FIXME: this is fine, just use the implicit tran. fix this when adding optional transactions const targetAddress = await this.findNode(targetNodeId); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + } // If the stored host is not a valid host (IP address), // then we assume it to be a hostname const targetHostname = !networkUtils.isHost(targetAddress.host) @@ -395,23 +398,17 @@ class NodeConnectionManager { public async findNode( targetNodeId: NodeId, options: { signal?: AbortSignal } = {}, - ): Promise { + ): Promise { const { signal } = { ...options }; // First check if we already have an existing ID -> address record let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network - if (address == null) { - address = await this.getClosestGlobalNodes(targetNodeId, undefined, { + address = + address ?? + (await this.getClosestGlobalNodes(targetNodeId, undefined, { signal, - }); - // TODO: This currently just does one iteration - // If not found in this single iteration, we throw an exception - if (address == null) { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); - } - } - // We ensure that we always return a NodeAddress (either by lookup, or - // network search) - if we can't locate it from either, we throw an exception + })); + // TODO: This currently just does one iteration return address; } @@ -585,6 +582,7 @@ class NodeConnectionManager { */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async syncNodeGraph(block: boolean = true, timer?: Timer) { + this.logger.info('Syncing nodeGraph'); for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { @@ -598,6 +596,7 @@ class NodeConnectionManager { this.keyManager.getNodeId(), timer, ); + // FIXME: we need to ping a node before setting it for (const [nodeId, nodeData] of nodes) { if (!block) { this.queue.push(() => diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index a9df2bcce..2838c8ea1 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -47,8 +47,8 @@ class NodeManager { protected refreshBucketQueue: Set = new Set(); protected refreshBucketQueueRunning: boolean = false; protected refreshBucketQueueRunner: Promise; - protected refreshBucketQueuePlug_: PromiseType; - protected refreshBucketQueueDrained_: PromiseType; + protected refreshBucketQueuePlug_: PromiseType = promise(); + protected refreshBucketQueueDrained_: PromiseType = promise(); protected refreshBucketQueueAbortController: AbortController; constructor({ @@ -109,7 +109,10 @@ class NodeManager { // We need to attempt a connection using the proxies // For now we will just do a forward connect + relay message const targetAddress = - address ?? (await this.nodeConnectionManager.findNode(nodeId))!; + address ?? (await this.nodeConnectionManager.findNode(nodeId)); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + } const targetHost = await networkUtils.resolveHost(targetAddress.host); return await this.nodeConnectionManager.pingNode( nodeId, diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index 8905f8718..f0fe65d4e 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -16,7 +16,6 @@ import Proxy from '@/network/Proxy'; import GRPCClientAgent from '@/agent/GRPCClientAgent'; import * as nodesUtils from '@/nodes/utils'; -import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; @@ -336,9 +335,9 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: 22222 as Port, } as NodeAddress); // Un-findable Node cannot be found - await expect(() => - nodeConnectionManager.findNode(nodeId), - ).rejects.toThrowError(nodesErrors.ErrorNodeGraphNodeIdNotFound); + await expect(nodeConnectionManager.findNode(nodeId)).resolves.toEqual( + undefined, + ); await server.stop(); } finally { diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index d433dcbd1..b63a4ae54 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -1,4 +1,4 @@ -import type { NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeId, NodeIdEncoded, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import type { Sigchain } from '@/sigchain'; import fs from 'fs'; @@ -123,6 +123,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); beforeEach(async () => { + // Clearing nodes from graphs + for await (const [nodeId] of remoteNode1.nodeGraph.getNodes()) { + await remoteNode1.nodeGraph.unsetNode(nodeId); + } + for await (const [nodeId] of remoteNode2.nodeGraph.getNodes()) { + await remoteNode2.nodeGraph.unsetNode(nodeId); + } dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -417,4 +424,84 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { await queue?.stop(); } }); + test('should expand the network when nodes enter', async () => { + // Using a single seed node we need to check that each entering node adds itself to the seed node. + // Also need to check that the new nodes can be seen in the network. + let node1: PolykeyAgent | undefined; + let node2: PolykeyAgent | undefined; + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + try { + logger.setLevel(LogLevel.WARN); + node1 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node1'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + node2 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node2'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + + await node1.queue.drained(); + await node1.nodeManager.refreshBucketQueueDrained(); + await node2.queue.drained(); + await node2.nodeManager.refreshBucketQueueDrained(); + + const getAllNodes = async (node: PolykeyAgent) => { + const nodes: Array = []; + for await (const [nodeId] of node.nodeGraph.getNodes()) { + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + return nodes; + }; + const rNode1Nodes = await getAllNodes(remoteNode1); + const rNode2Nodes = await getAllNodes(remoteNode2); + const node1Nodes = await getAllNodes(node1); + const node2Nodes = await getAllNodes(node2); + + const nodeIdR1 = nodesUtils.encodeNodeId(remoteNodeId1); + const nodeIdR2 = nodesUtils.encodeNodeId(remoteNodeId2); + const nodeId1 = nodesUtils.encodeNodeId(node1.keyManager.getNodeId()); + const nodeId2 = nodesUtils.encodeNodeId(node2.keyManager.getNodeId()); + expect(rNode1Nodes).toContain(nodeId1); + expect(rNode1Nodes).toContain(nodeId2); + expect(rNode2Nodes).toContain(nodeId1); + expect(rNode2Nodes).toContain(nodeId2); + expect(node1Nodes).toContain(nodeIdR1); + expect(node1Nodes).toContain(nodeIdR2); + expect(node1Nodes).toContain(nodeId2); + expect(node2Nodes).toContain(nodeIdR1); + expect(node2Nodes).toContain(nodeIdR2); + expect(node2Nodes).toContain(nodeId1); + } finally { + logger.setLevel(LogLevel.WARN); + await node1?.stop(); + await node1?.destroy(); + await node2?.stop(); + await node2?.destroy(); + } + }); }); From 2a5f2ff43256df54cd768b16a5c5ad369ca1abbc Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 29 Apr 2022 16:58:11 +1000 Subject: [PATCH 30/39] tests: Added agent service tests for `nodesChainDataGet`, `nodesClosestLocalNode` and `nodesHolePunchMessage` --- tests/agent/service/nodesChainDataGet.test.ts | 108 ++++++++++++++++ .../service/nodesClosestLocalNode.test.ts | 118 ++++++++++++++++++ .../service/nodesHolePunchMessage.test.ts | 103 +++++++++++++++ 3 files changed, 329 insertions(+) create mode 100644 tests/agent/service/nodesChainDataGet.test.ts create mode 100644 tests/agent/service/nodesClosestLocalNode.test.ts create mode 100644 tests/agent/service/nodesHolePunchMessage.test.ts diff --git a/tests/agent/service/nodesChainDataGet.test.ts b/tests/agent/service/nodesChainDataGet.test.ts new file mode 100644 index 000000000..8bc388763 --- /dev/null +++ b/tests/agent/service/nodesChainDataGet.test.ts @@ -0,0 +1,108 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeIdEncoded } from '@/nodes/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesClosestLocalNodesGet from '@/agent/service/nodesClosestLocalNodesGet'; +import * as testNodesUtils from '../../nodes/utils'; +import * as testUtils from '../../utils'; + +describe('nodesClosestLocalNode', () => { + const logger = new Logger('nodesClosestLocalNode test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + // Setting up a remote keynode + const agentService = { + nodesClosestLocalNodesGet: nodesClosestLocalNodesGet({ + nodeGraph: pkAgent.nodeGraph, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest local nodes', async () => { + // Adding 10 nodes + const nodes: Array = []; + for (let i = 0; i < 10; i++) { + const nodeId = testNodesUtils.generateRandomNodeId(); + await pkAgent.nodeGraph.setNode(nodeId, { + host: 'localhost' as Host, + port: 55555 as Port, + }); + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + const nodeIdEncoded = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeMessage = new nodesPB.Node(); + nodeMessage.setNodeId(nodeIdEncoded); + const result = await grpcClient.nodesClosestLocalNodesGet(nodeMessage); + const resultNodes: Array = []; + for (const [resultNode] of result.toObject().nodeTableMap) { + resultNodes.push(resultNode as NodeIdEncoded); + } + expect(nodes.sort()).toEqual(resultNodes.sort()); + }); +}); diff --git a/tests/agent/service/nodesClosestLocalNode.test.ts b/tests/agent/service/nodesClosestLocalNode.test.ts new file mode 100644 index 000000000..5453d8e5a --- /dev/null +++ b/tests/agent/service/nodesClosestLocalNode.test.ts @@ -0,0 +1,118 @@ +import type { Host, Port } from '@/network/types'; +import type { ClaimData } from '@/claims/types'; +import type { IdentityId, ProviderId } from '@/identities/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesChainDataGet from '@/agent/service/nodesChainDataGet'; +import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; + +describe('nodesChainDataGet', () => { + const logger = new Logger('nodesChainDataGet test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesChainDataGet: nodesChainDataGet({ + sigchain: pkAgent.sigchain, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest nodes', async () => { + const srcNodeIdEncoded = nodesUtils.encodeNodeId( + pkAgent.keyManager.getNodeId(), + ); + // Add 10 claims + for (let i = 1; i <= 5; i++) { + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeLink: ClaimData = { + type: 'node', + node1: srcNodeIdEncoded, + node2: node2, + }; + await pkAgent.sigchain.addClaim(nodeLink); + } + for (let i = 6; i <= 10; i++) { + const identityLink: ClaimData = { + type: 'identity', + node: srcNodeIdEncoded, + provider: ('ProviderId' + i.toString()) as ProviderId, + identity: ('IdentityId' + i.toString()) as IdentityId, + }; + await pkAgent.sigchain.addClaim(identityLink); + } + + const response = await grpcClient.nodesChainDataGet( + new utilsPB.EmptyMessage(), + ); + const chainIds: Array = []; + for (const [id] of response.toObject().chainDataMap) chainIds.push(id); + expect(chainIds).toHaveLength(10); + }); +}); diff --git a/tests/agent/service/nodesHolePunchMessage.test.ts b/tests/agent/service/nodesHolePunchMessage.test.ts new file mode 100644 index 000000000..4bef6d759 --- /dev/null +++ b/tests/agent/service/nodesHolePunchMessage.test.ts @@ -0,0 +1,103 @@ +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesHolePunchMessageSend from '@/agent/service/nodesHolePunchMessageSend'; +import * as networkUtils from '@/network/utils'; +import * as testUtils from '../../utils'; + +describe('nodesHolePunchMessage', () => { + const logger = new Logger('nodesHolePunchMessage test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesHolePunchMessageSend: nodesHolePunchMessageSend({ + keyManager: pkAgent.keyManager, + nodeConnectionManager: pkAgent.nodeConnectionManager, + nodeManager: pkAgent.nodeManager, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get the chain data', async () => { + const nodeId = nodesUtils.encodeNodeId(pkAgent.keyManager.getNodeId()); + const proxyAddress = networkUtils.buildAddress( + pkAgent.proxy.getProxyHost(), + pkAgent.proxy.getProxyPort(), + ); + const signature = await pkAgent.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const relayMessage = new nodesPB.Relay(); + relayMessage + .setTargetId(nodeId) + .setSrcId(nodeId) + .setSignature(signature.toString()) + .setProxyAddress(proxyAddress); + await grpcClient.nodesHolePunchMessageSend(relayMessage); + // TODO: check if the ping was sent + }); +}); From dadfb34a2d4d7b8dfd1d171b3a73836cde193d8d Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 2 Jun 2022 17:04:54 +1000 Subject: [PATCH 31/39] fix: post re-base fixes DB and type changes using new transactions Linting Fixing timeouts --- src/PolykeyAgent.ts | 4 +- src/acl/ACL.ts | 2 +- src/client/service/nodesAdd.ts | 4 +- src/nodes/NodeConnectionManager.ts | 17 +- src/nodes/NodeGraph.ts | 126 +++--- src/nodes/NodeManager.ts | 24 +- src/nodes/errors.ts | 8 +- src/nodes/utils.ts | 41 +- tests/agent/service/nodesChainDataGet.test.ts | 2 + .../service/nodesClosestLocalNode.test.ts | 2 + .../service/nodesHolePunchMessage.test.ts | 2 + tests/client/service/agentLockAll.test.ts | 3 +- tests/gestalts/GestaltGraph.test.ts | 4 +- tests/network/Proxy.test.ts | 10 +- tests/nodes/NodeConnection.test.ts | 2 +- .../NodeConnectionManager.seednodes.test.ts | 158 +++---- tests/nodes/utils.test.ts | 30 +- tests/utils.ts | 1 - tests/vaults/VaultInternal.test.ts | 102 ++--- tests/vaults/VaultManager.test.ts | 412 +++++++++--------- 20 files changed, 481 insertions(+), 473 deletions(-) diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 781530290..e3f033c71 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -690,6 +690,7 @@ class PolykeyAgent { this.logger.info(`Started ${this.constructor.name}`); } catch (e) { this.logger.warn(`Failed Starting ${this.constructor.name}`); + this.events.removeAllListeners(); await this.status?.beginStop({ pid: process.pid }); await this.sessionManager?.stop(); await this.notificationsManager?.stop(); @@ -706,7 +707,6 @@ class PolykeyAgent { await this.keyManager?.stop(); await this.schema?.stop(); await this.status?.stop({}); - this.events.removeAllListeners(); throw e; } } @@ -716,6 +716,7 @@ class PolykeyAgent { */ public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); + this.events.removeAllListeners(); await this.status.beginStop({ pid: process.pid }); await this.sessionManager.stop(); await this.notificationsManager.stop(); @@ -736,7 +737,6 @@ class PolykeyAgent { await this.keyManager.stop(); await this.schema.stop(); await this.status.stop({}); - this.events.removeAllListeners(); this.logger.info(`Stopped ${this.constructor.name}`); } diff --git a/src/acl/ACL.ts b/src/acl/ACL.ts index 7d737e04c..ac83ade13 100644 --- a/src/acl/ACL.ts +++ b/src/acl/ACL.ts @@ -321,7 +321,7 @@ class ACL { true, ); if (permId == null) { - const permId = await this.generatePermId(); + const permId = this.generatePermId(); const permRef = { count: 1, object: { diff --git a/src/client/service/nodesAdd.ts b/src/client/service/nodesAdd.ts index 0884a0f0b..0d993c746 100644 --- a/src/client/service/nodesAdd.ts +++ b/src/client/service/nodesAdd.ts @@ -14,7 +14,7 @@ import * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '../utils'; /** - * Adds a node ID -> node address mapping into the buckets database. + * Adds a node ID -> node address mapping into the buckets' database. * This is an unrestricted add: no validity checks are made for the correctness * of the passed ID or host/port. */ @@ -67,6 +67,8 @@ function nodesAdd({ host, port, } as NodeAddress, + true, + true, undefined, tran, ), diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 34f5f4d04..484757f81 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -11,13 +11,10 @@ import type { NodeId, NodeIdString, SeedNodes, - NodeEntry, - NodeBucket, - NodeIdString, } from './types'; -import { withF } from '@matrixai/resources'; import type NodeManager from './NodeManager'; import type { AbortSignal } from 'node-abort-controller'; +import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; import { IdInternal } from '@matrixai/id'; @@ -139,7 +136,6 @@ class NodeConnectionManager { * an acquire function with no parameters). * @param targetNodeId Id of target node to communicate with * @param timer Connection timeout timer - * @param address Optional address to connect to * @returns ResourceAcquire Resource API for use in with contexts */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @@ -148,7 +144,10 @@ class NodeConnectionManager { timer?: Timer, ): Promise>> { return async () => { - const { connection, timer } = await this.getConnection(targetNodeId, timer); + const { connection, timer: timeToLiveTimer } = await this.getConnection( + targetNodeId, + timer, + ); // Acquire the read lock and the release function const [release] = await this.connectionLocks.lock([ targetNodeId.toString(), @@ -156,7 +155,7 @@ class NodeConnectionManager { 'write', ])(); // Resetting TTL timer - timer?.refresh(); + timeToLiveTimer?.refresh(); // Return tuple of [ResourceRelease, Resource] return [ async (e) => { @@ -224,7 +223,7 @@ class NodeConnectionManager { const [release, conn] = await acquire(); let caughtError; try { - return yield* await g(conn!); + return yield* g(conn!); } catch (e) { caughtError = e; throw e; @@ -604,7 +603,7 @@ class NodeConnectionManager { ); } else { try { - // FIXME: no tran neededawait this.nodeManager?.setNode(nodeId, nodeData.address); + await this.nodeManager?.setNode(nodeId, nodeData.address); } catch (e) { if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; } diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index f4f7ae188..a5b8da332 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -198,7 +198,7 @@ class NodeGraph { }, this.nodeGraphBucketsDbPath, )) { - const { nodeId } = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + const { nodeId } = nodesUtils.parseBucketsDbKey(key as Array); yield [nodeId, nodeData]; } } @@ -224,11 +224,9 @@ class NodeGraph { const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; - const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; - const nodeData = await tran.get([ - ...bucketPath, - nodesUtils.bucketDbKey(nodeId), - ]); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey, nodeIdKey]; + const nodeData = await tran.get(bucketPath); if (nodeData != null) { this.logger.debug( `Updating node ${nodesUtils.encodeNodeId( @@ -236,33 +234,27 @@ class NodeGraph { )} in bucket ${bucketIndex}`, ); // If the node already exists we want to remove the old `lastUpdated` - const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( - nodeData.lastUpdated, - nodeId, - ); - await tran.del([...lastUpdatedPath, lastUpdatedKey]); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); } else { this.logger.debug( `Adding node ${nodesUtils.encodeNodeId( nodeId, )} to bucket ${bucketIndex}`, ); - // It didn't exist so we want to increment the bucket count + // It didn't exist, so we want to increment the bucket count const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); await this.setBucketMetaProp(bucketIndex, 'count', count + 1, tran); } const lastUpdated = getUnixtime(); - await tran.put([...bucketPath, nodesUtils.bucketDbKey(nodeId)], { + await tran.put(bucketPath, { address: nodeAddress, lastUpdated, }); - const newLastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( - lastUpdated, - nodeId, - ); + const newLastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); await tran.put( - [...lastUpdatedPath, newLastUpdatedKey], - nodesUtils.bucketDbKey(nodeId), + [...lastUpdatedPath, newLastUpdatedKey, nodeIdKey], + nodeIdKey, true, ); } @@ -281,8 +273,13 @@ class NodeGraph { const bucketKey = nodesUtils.bucketKey(bucketIndex); // Remove the oldest entry in the bucket const oldestNodeIds: Array = []; - for await (const [key] of tran.iterator({ limit }, [...this.nodeGraphLastUpdatedDbPath, bucketKey])) { - const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(key as unknown as Buffer); + for await (const [key] of tran.iterator({ limit }, [ + ...this.nodeGraphLastUpdatedDbPath, + bucketKey, + ])) { + const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey( + key as Array, + ); oldestNodeIds.push(nodeId); } return oldestNodeIds; @@ -299,10 +296,8 @@ class NodeGraph { const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); const bucketPath = [...this.nodeGraphBucketsDbPath, bucketKey]; const lastUpdatedPath = [...this.nodeGraphLastUpdatedDbPath, bucketKey]; - const nodeData = await tran.get([ - ...bucketPath, - nodesUtils.bucketDbKey(nodeId), - ]); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const nodeData = await tran.get([...bucketPath, nodeIdKey]); if (nodeData != null) { this.logger.debug( `Removing node ${nodesUtils.encodeNodeId( @@ -311,12 +306,9 @@ class NodeGraph { ); const count = await this.getBucketMetaProp(bucketIndex, 'count', tran); await this.setBucketMetaProp(bucketIndex, 'count', count - 1, tran); - await tran.del([...bucketPath, nodesUtils.bucketDbKey(nodeId)]); - const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( - nodeData.lastUpdated, - nodeId, - ); - await tran.del([...lastUpdatedPath, lastUpdatedKey]); + await tran.del([...bucketPath, nodeIdKey]); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); + await tran.del([...lastUpdatedPath, lastUpdatedKey, nodeIdKey]); } } @@ -354,7 +346,7 @@ class NodeGraph { }, [...this.nodeGraphBucketsDbPath, bucketKey], )) { - const nodeId = nodesUtils.parseBucketDbKey(key as unknown as Buffer); + const nodeId = nodesUtils.parseBucketDbKey(key[0] as Buffer); bucket.push([nodeId, nodeData]); } if (sort === 'distance') { @@ -429,7 +421,7 @@ class NodeGraph { this.nodeGraphBucketsDbPath, )) { const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + nodesUtils.parseBucketsDbKey(key as Array); if (bucketIndex == null) { // First entry of the first bucket bucketIndex = bucketIndex_; @@ -475,8 +467,8 @@ class NodeGraph { this.nodeGraphLastUpdatedDbPath, )) { const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseLastUpdatedBucketsDbKey(key as unknown as Buffer); - bucketsDbIterator.seek(nodesUtils.bucketsDbKey(bucketIndex_, nodeId)); + nodesUtils.parseLastUpdatedBucketsDbKey(key as Array); + bucketsDbIterator.seek([key[0], key[2]]); // @ts-ignore // eslint-disable-next-line const iteratorResult = await bucketsDbIterator.next(); @@ -518,8 +510,10 @@ class NodeGraph { ); } + const logger = this.logger.getChild('resetBuckets'); // Setup new space const spaceNew = this.space === '0' ? '1' : '0'; + logger.debug('new space: ' + spaceNew); const nodeGraphMetaDbPathNew = [...this.nodeGraphDbPath, 'meta' + spaceNew]; const nodeGraphBucketsDbPathNew = [ ...this.nodeGraphDbPath, @@ -540,10 +534,16 @@ class NodeGraph { this.nodeGraphBucketsDbPath, )) { // The key is a combined bucket key and node ID - const { nodeId } = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + const { bucketIndex: bucketIndexOld, nodeId } = + nodesUtils.parseBucketsDbKey(key as Array); + const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); // If the new own node ID is one of the existing node IDs, it is just dropped // We only map to the new bucket if it isn't one of the existing node IDs if (nodeId.equals(nodeIdOwn)) { + logger.debug( + `nodeId ${nodeIdEncoded} from bucket ${bucketIndexOld} was identical to new NodeId and was dropped.`, + ); continue; } const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); @@ -555,7 +555,7 @@ class NodeGraph { if (countNew < this.nodeBucketLimit) { await tran.put([...metaPathNew, 'count'], countNew + 1); } else { - let oldestIndexKey: Buffer | undefined = undefined; + let oldestIndexKey: Array | undefined = undefined; let oldestNodeId: NodeId | undefined = undefined; for await (const [key] of tran.iterator( { @@ -563,28 +563,29 @@ class NodeGraph { }, indexPathNew, )) { - oldestIndexKey = key as unknown as Buffer; + oldestIndexKey = key as Array; ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( - key as unknown as Buffer, + key as Array, )); } await tran.del([ ...bucketPathNew, nodesUtils.bucketDbKey(oldestNodeId!), ]); - await tran.del([...indexPathNew, oldestIndexKey!]); + await tran.del([...indexPathNew, ...oldestIndexKey!]); } + if (bucketIndexOld !== bucketIndexNew) { + logger.debug( + `nodeId ${nodeIdEncoded} moved ${bucketIndexOld}=>${bucketIndexNew}`, + ); + } else { + logger.debug(`nodeId ${nodeIdEncoded} unchanged ${bucketIndexOld}`); + } + await tran.put([...bucketPathNew, nodeIdKey], nodeData); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(nodeData.lastUpdated); await tran.put( - [...bucketPathNew, nodesUtils.bucketDbKey(nodeId)], - nodeData, - ); - const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( - nodeData.lastUpdated, - nodeId, - ); - await tran.put( - [...indexPathNew, lastUpdatedKey], - nodesUtils.bucketDbKey(nodeId), + [...indexPathNew, lastUpdatedKey, nodeIdKey], + nodeIdKey, true, ); } @@ -678,6 +679,8 @@ class NodeGraph { * current node has less than k nodes in all of its buckets, in which case it * returns all nodes it has knowledge of) */ + // FIXME: this is still operating on assumptions from old code. + // I can't get the gt/lt to work on the iterator. @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getClosestNodes( nodeId: NodeId, @@ -716,55 +719,44 @@ class NodeGraph { // We can just use `!(lexpack bucketId)` to start from // Less than `!(bucketId 101)!` gets us buckets 100 and lower // greater than `!(bucketId 99)!` gets up buckets 100 and greater - const prefix = Buffer.from([33]); // Code for `!` prefix if (nodeIds.length < limit) { // Just before target bucket - const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket)); - const endKeyLower = Buffer.concat([prefix, bucketId, prefix]); + const bucketIdKey = Buffer.from(nodesUtils.bucketKey(startingBucket)); const remainingLimit = limit - nodeIds.length; // Iterate over lower buckets - tran.iterator( - { - lt: endKeyLower, - limit: remainingLimit, - valueAsBuffer: false, - }, - this.nodeGraphBucketsDbPath, - ); for await (const [key, nodeData] of tran.iterator( { - lt: endKeyLower, + lt: [bucketIdKey, ''], limit: remainingLimit, valueAsBuffer: false, }, this.nodeGraphBucketsDbPath, )) { - const info = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + const info = nodesUtils.parseBucketsDbKey(key as Array); nodeIds.push([info.nodeId, nodeData]); } } if (nodeIds.length < limit) { // Just after target bucket const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket + 1)); - const startKeyUpper = Buffer.concat([prefix, bucketId, prefix]); const remainingLimit = limit - nodeIds.length; // Iterate over ids further away tran.iterator( { - gt: startKeyUpper, + gt: [bucketId, ''], limit: remainingLimit, }, this.nodeGraphBucketsDbPath, ); for await (const [key, nodeData] of tran.iterator( { - gt: startKeyUpper, + gt: [bucketId, ''], limit: remainingLimit, valueAsBuffer: false, }, this.nodeGraphBucketsDbPath, )) { - const info = nodesUtils.parseBucketsDbKey(key as unknown as Buffer); + const info = nodesUtils.parseBucketsDbKey(key as Array); nodeIds.push([info.nodeId, nodeData]); } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 2838c8ea1..23832dbb7 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -116,10 +116,8 @@ class NodeManager { const targetHost = await networkUtils.resolveHost(targetAddress.host); return await this.nodeConnectionManager.pingNode( nodeId, - { - host: targetHost, - port: targetAddress.port, - }, + targetHost, + targetAddress.port, timer, ); } @@ -383,7 +381,7 @@ class NodeManager { */ public async getBucket( bucketIndex: number, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { return await this.nodeGraph.getBucket( bucketIndex, @@ -402,7 +400,7 @@ class NodeManager { * @param block - Flag for if the operation should block or utilize the async queue * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. - * @param timeout Connection timeout timeout + * @param timeout Connection timeout * @param tran */ @ready(new nodesErrors.ErrorNodeManagerNotRunning()) @@ -412,8 +410,14 @@ class NodeManager { block: boolean = true, force: boolean = false, timeout?: number, - tran: DBTransaction, + tran?: DBTransaction, ): Promise { + if (tran == null) { + return this.db.withTransactionF(async (tran) => + this.setNode(nodeId, nodeAddress, block, force, timeout, tran), + ); + } + // When adding a node we need to handle 3 cases // 1. The node already exists. We need to update it's last updated field // 2. The node doesn't exist and bucket has room. @@ -486,7 +490,7 @@ class NodeManager { nodeAddress: NodeAddress, timeout?: number, ) { - const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3, tran); + const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3); // We want to concurrently ping the nodes const pingPromises = oldestNodeIds.map((nodeId) => { const doPing = async (): Promise<{ @@ -540,8 +544,8 @@ class NodeManager { * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. */ - public async resetBuckets(tran?: DBTransaction): Promise { - return await this.nodeGraph.resetBuckets(this.keyManager.getNodeId(tran)); + public async resetBuckets(): Promise { + return await this.nodeGraph.resetBuckets(this.keyManager.getNodeId()); } /** diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index ad5b31c90..b35a58f70 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -2,17 +2,17 @@ import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} -class ErrorNodeAborted extends ErrorNodes { - description = 'Operation was aborted'; +class ErrorNodeAborted extends ErrorNodes { + static description = 'Operation was aborted'; exitCode = sysexits.USAGE; } -class ErrorNodeManagerNotRunning extends ErrorNodes { +class ErrorNodeManagerNotRunning extends ErrorNodes { static description = 'NodeManager is not running'; exitCode = sysexits.USAGE; } -class ErrorQueueNotRunning extends ErrorNodes { +class ErrorQueueNotRunning extends ErrorNodes { static description = 'queue is not running'; exitCode = sysexits.USAGE; } diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index c61a6cd58..449fc407b 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -6,7 +6,7 @@ import type { } from './types'; import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; -import { bytes2BigInt, bufferSplit } from '../utils'; +import { bytes2BigInt } from '../utils'; import * as keysUtils from '../keys/utils'; // FIXME: @@ -135,22 +135,21 @@ function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { ]); } +function lastUpdatedKey(lastUpdated: number): Buffer { + return Buffer.from(lexi.pack(lastUpdated, 'hex')); +} + /** * Parse the NodeGraph buckets sublevel key * The keys look like `!!` * It is assumed that the `!` is the sublevel prefix. */ -function parseBucketsDbKey(keyBuffer: Buffer): { +function parseBucketsDbKey(keyBufferArray: Array): { bucketIndex: NodeBucketIndex; bucketKey: string; nodeId: NodeId; } { - const [, bucketKeyBuffer, nodeIdBuffer] = bufferSplit( - keyBuffer, - prefixBuffer, - 3, - true, - ); + const [bucketKeyBuffer, nodeIdBuffer] = keyBufferArray; if (bucketKeyBuffer == null || nodeIdBuffer == null) { throw new TypeError('Buffer is not an NodeGraph buckets key'); } @@ -178,19 +177,14 @@ function parseBucketDbKey(keyBuffer: Buffer): NodeId { * The keys look like `!!-` * It is assumed that the `!` is the sublevel prefix. */ -function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { +function parseLastUpdatedBucketsDbKey(keyBufferArray: Array): { bucketIndex: NodeBucketIndex; bucketKey: string; lastUpdated: number; nodeId: NodeId; } { - const [, bucketKeyBuffer, lastUpdatedBuffer] = bufferSplit( - keyBuffer, - prefixBuffer, - 3, - true, - ); - if (bucketKeyBuffer == null || lastUpdatedBuffer == null) { + const [bucketKeyBuffer, ...lastUpdatedBufferArray] = keyBufferArray; + if (bucketKeyBuffer == null || lastUpdatedBufferArray == null) { throw new TypeError('Buffer is not an NodeGraph index key'); } const bucketKey = bucketKeyBuffer.toString(); @@ -198,8 +192,9 @@ function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { if (bucketIndex == null) { throw new TypeError('Buffer is not an NodeGraph index key'); } - const { lastUpdated, nodeId } = - parseLastUpdatedBucketDbKey(lastUpdatedBuffer); + const { lastUpdated, nodeId } = parseLastUpdatedBucketDbKey( + lastUpdatedBufferArray, + ); return { bucketIndex, bucketKey, @@ -213,16 +208,11 @@ function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { * The keys look like `-` * It is assumed that the `!` is the sublevel prefix. */ -function parseLastUpdatedBucketDbKey(keyBuffer: Buffer): { +function parseLastUpdatedBucketDbKey(keyBufferArray: Array): { lastUpdated: number; nodeId: NodeId; } { - const [lastUpdatedBuffer, nodeIdBuffer] = bufferSplit( - keyBuffer, - Buffer.from('-'), - 2, - true, - ); + const [lastUpdatedBuffer, nodeIdBuffer] = keyBufferArray; if (lastUpdatedBuffer == null || nodeIdBuffer == null) { throw new TypeError('Buffer is not an NodeGraph index bucket key'); } @@ -332,6 +322,7 @@ export { bucketDbKey, lastUpdatedBucketsDbKey, lastUpdatedBucketDbKey, + lastUpdatedKey, parseBucketsDbKey, parseBucketDbKey, parseLastUpdatedBucketsDbKey, diff --git a/tests/agent/service/nodesChainDataGet.test.ts b/tests/agent/service/nodesChainDataGet.test.ts index 8bc388763..306d9cd06 100644 --- a/tests/agent/service/nodesChainDataGet.test.ts +++ b/tests/agent/service/nodesChainDataGet.test.ts @@ -55,6 +55,8 @@ describe('nodesClosestLocalNode', () => { const agentService = { nodesClosestLocalNodesGet: nodesClosestLocalNodesGet({ nodeGraph: pkAgent.nodeGraph, + db: pkAgent.db, + logger, }), }; grpcServer = new GRPCServer({ logger }); diff --git a/tests/agent/service/nodesClosestLocalNode.test.ts b/tests/agent/service/nodesClosestLocalNode.test.ts index 5453d8e5a..4e080443a 100644 --- a/tests/agent/service/nodesClosestLocalNode.test.ts +++ b/tests/agent/service/nodesClosestLocalNode.test.ts @@ -55,6 +55,8 @@ describe('nodesChainDataGet', () => { const agentService = { nodesChainDataGet: nodesChainDataGet({ sigchain: pkAgent.sigchain, + db: pkAgent.db, + logger, }), }; grpcServer = new GRPCServer({ logger }); diff --git a/tests/agent/service/nodesHolePunchMessage.test.ts b/tests/agent/service/nodesHolePunchMessage.test.ts index 4bef6d759..70615948c 100644 --- a/tests/agent/service/nodesHolePunchMessage.test.ts +++ b/tests/agent/service/nodesHolePunchMessage.test.ts @@ -55,6 +55,8 @@ describe('nodesHolePunchMessage', () => { keyManager: pkAgent.keyManager, nodeConnectionManager: pkAgent.nodeConnectionManager, nodeManager: pkAgent.nodeManager, + db: pkAgent.db, + logger, }), }; grpcServer = new GRPCServer({ logger }); diff --git a/tests/client/service/agentLockAll.test.ts b/tests/client/service/agentLockAll.test.ts index a024cc05c..fe56a0d7d 100644 --- a/tests/client/service/agentLockAll.test.ts +++ b/tests/client/service/agentLockAll.test.ts @@ -14,6 +14,7 @@ import { ClientServiceService } from '@/proto/js/polykey/v1/client_service_grpc_ import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as keysUtils from '@/keys/utils'; import * as clientUtils from '@/client/utils/utils'; +import { timerStart } from '@/utils/index'; import * as testUtils from '../../utils'; describe('agentLockall', () => { @@ -89,7 +90,7 @@ describe('agentLockall', () => { nodeId: keyManager.getNodeId(), host: '127.0.0.1' as Host, port: grpcServer.getPort(), - timeout: 5000, + timer: timerStart(5000), logger, }); }); diff --git a/tests/gestalts/GestaltGraph.test.ts b/tests/gestalts/GestaltGraph.test.ts index 0953a2b4a..e24a08e00 100644 --- a/tests/gestalts/GestaltGraph.test.ts +++ b/tests/gestalts/GestaltGraph.test.ts @@ -1248,8 +1248,8 @@ describe('GestaltGraph', () => { // its just that node 1 is eliminated nodePerms = await acl.getNodePerms(); expect(Object.keys(nodePerms)).toHaveLength(1); - expect(nodePerms[0]).not.toHaveProperty(nodeIdABC.toString()); - expect(nodePerms[0]).toHaveProperty(nodeIdDEE.toString()); + expect(nodePerms[0][nodeIdABC.toString()]).toBeUndefined(); + expect(nodePerms[0][nodeIdDEE.toString()]).toBeDefined(); await gestaltGraph.unsetNode(nodeIdDEE); nodePerms = await acl.getNodePerms(); expect(Object.keys(nodePerms)).toHaveLength(0); diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index 7e8b12d46..5bab753c4 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -3034,10 +3034,7 @@ describe(Proxy.name, () => { }; await utpSocketBind(0, localHost); const utpSocketPort = utpSocket.address().port; - await proxy.openConnectionReverse( - localHost, - utpSocketPort as Port, - ); + await proxy.openConnectionReverse(localHost, utpSocketPort as Port); const utpConn = utpSocket.connect(proxyPort, proxyHost); const tlsSocket = tls.connect( { @@ -3068,10 +3065,7 @@ describe(Proxy.name, () => { await clientReadyP; await clientSecureConnectP; await serverConnP; - await proxy.closeConnectionReverse( - localHost, - utpSocketPort as Port, - ); + await proxy.closeConnectionReverse(localHost, utpSocketPort as Port); expect(proxy.getConnectionReverseCount()).toBe(0); await clientCloseP; await serverConnEndP; diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 725e6a684..dbd95397e 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -63,7 +63,7 @@ const mockedGenerateDeterministicKeyPair = jest.spyOn( 'generateDeterministicKeyPair', ); -describe('${NodeConnection.name} test', () => { +describe(`${NodeConnection.name} test`, () => { const logger = new Logger(`${NodeConnection.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index b63a4ae54..e6d91f399 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -424,84 +424,88 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { await queue?.stop(); } }); - test('should expand the network when nodes enter', async () => { - // Using a single seed node we need to check that each entering node adds itself to the seed node. - // Also need to check that the new nodes can be seen in the network. - let node1: PolykeyAgent | undefined; - let node2: PolykeyAgent | undefined; - const seedNodes: SeedNodes = {}; - seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { - host: remoteNode1.proxy.getProxyHost(), - port: remoteNode1.proxy.getProxyPort(), - }; - seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { - host: remoteNode2.proxy.getProxyHost(), - port: remoteNode2.proxy.getProxyPort(), - }; - try { - logger.setLevel(LogLevel.WARN); - node1 = await PolykeyAgent.createPolykeyAgent({ - nodePath: path.join(dataDir, 'node1'), - password: 'password', - networkConfig: { - proxyHost: localHost, - agentHost: localHost, - clientHost: localHost, - forwardHost: localHost, - }, - seedNodes, - logger, - }); - node2 = await PolykeyAgent.createPolykeyAgent({ - nodePath: path.join(dataDir, 'node2'), - password: 'password', - networkConfig: { - proxyHost: localHost, - agentHost: localHost, - clientHost: localHost, - forwardHost: localHost, - }, - seedNodes, - logger, - }); + test( + 'should expand the network when nodes enter', + async () => { + // Using a single seed node we need to check that each entering node adds itself to the seed node. + // Also need to check that the new nodes can be seen in the network. + let node1: PolykeyAgent | undefined; + let node2: PolykeyAgent | undefined; + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + try { + logger.setLevel(LogLevel.WARN); + node1 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node1'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + node2 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node2'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); - await node1.queue.drained(); - await node1.nodeManager.refreshBucketQueueDrained(); - await node2.queue.drained(); - await node2.nodeManager.refreshBucketQueueDrained(); + await node1.queue.drained(); + await node1.nodeManager.refreshBucketQueueDrained(); + await node2.queue.drained(); + await node2.nodeManager.refreshBucketQueueDrained(); - const getAllNodes = async (node: PolykeyAgent) => { - const nodes: Array = []; - for await (const [nodeId] of node.nodeGraph.getNodes()) { - nodes.push(nodesUtils.encodeNodeId(nodeId)); - } - return nodes; - }; - const rNode1Nodes = await getAllNodes(remoteNode1); - const rNode2Nodes = await getAllNodes(remoteNode2); - const node1Nodes = await getAllNodes(node1); - const node2Nodes = await getAllNodes(node2); + const getAllNodes = async (node: PolykeyAgent) => { + const nodes: Array = []; + for await (const [nodeId] of node.nodeGraph.getNodes()) { + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + return nodes; + }; + const rNode1Nodes = await getAllNodes(remoteNode1); + const rNode2Nodes = await getAllNodes(remoteNode2); + const node1Nodes = await getAllNodes(node1); + const node2Nodes = await getAllNodes(node2); - const nodeIdR1 = nodesUtils.encodeNodeId(remoteNodeId1); - const nodeIdR2 = nodesUtils.encodeNodeId(remoteNodeId2); - const nodeId1 = nodesUtils.encodeNodeId(node1.keyManager.getNodeId()); - const nodeId2 = nodesUtils.encodeNodeId(node2.keyManager.getNodeId()); - expect(rNode1Nodes).toContain(nodeId1); - expect(rNode1Nodes).toContain(nodeId2); - expect(rNode2Nodes).toContain(nodeId1); - expect(rNode2Nodes).toContain(nodeId2); - expect(node1Nodes).toContain(nodeIdR1); - expect(node1Nodes).toContain(nodeIdR2); - expect(node1Nodes).toContain(nodeId2); - expect(node2Nodes).toContain(nodeIdR1); - expect(node2Nodes).toContain(nodeIdR2); - expect(node2Nodes).toContain(nodeId1); - } finally { - logger.setLevel(LogLevel.WARN); - await node1?.stop(); - await node1?.destroy(); - await node2?.stop(); - await node2?.destroy(); - } - }); + const nodeIdR1 = nodesUtils.encodeNodeId(remoteNodeId1); + const nodeIdR2 = nodesUtils.encodeNodeId(remoteNodeId2); + const nodeId1 = nodesUtils.encodeNodeId(node1.keyManager.getNodeId()); + const nodeId2 = nodesUtils.encodeNodeId(node2.keyManager.getNodeId()); + expect(rNode1Nodes).toContain(nodeId1); + expect(rNode1Nodes).toContain(nodeId2); + expect(rNode2Nodes).toContain(nodeId1); + expect(rNode2Nodes).toContain(nodeId2); + expect(node1Nodes).toContain(nodeIdR1); + expect(node1Nodes).toContain(nodeIdR2); + expect(node1Nodes).toContain(nodeId2); + expect(node2Nodes).toContain(nodeIdR1); + expect(node2Nodes).toContain(nodeIdR2); + expect(node2Nodes).toContain(nodeId1); + } finally { + logger.setLevel(LogLevel.WARN); + await node1?.stop(); + await node1?.destroy(); + await node2?.stop(); + await node2?.destroy(); + } + }, + global.defaultTimeout * 2, + ); }); diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index c87a82f26..0d962f963 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -94,7 +94,7 @@ describe('nodes/utils', () => { } }); test('parse NodeGraph buckets db key', async () => { - const bucketsDb = await db.level('buckets'); + const bucketsDbPath = ['buckets']; const data: Array<{ bucketIndex: number; bucketKey: string; @@ -111,16 +111,19 @@ describe('nodes/utils', () => { nodeId, key: Buffer.concat([Buffer.from(bucketKey), nodeId]), }); - const bucketDomain = ['buckets', bucketKey]; - await db.put(bucketDomain, nodesUtils.bucketDbKey(nodeId), null); + await db.put( + ['buckets', bucketKey, nodesUtils.bucketDbKey(nodeId)], + null, + ); } // LevelDB will store keys in lexicographic order // Use the key property as a concatenated buffer of the bucket key and node ID data.sort((a, b) => Buffer.compare(a.key, b.key)); let i = 0; - for await (const key of bucketsDb.createKeyStream()) { + + for await (const [key] of db.iterator({}, bucketsDbPath)) { const { bucketIndex, bucketKey, nodeId } = nodesUtils.parseBucketsDbKey( - key as Buffer, + key as Array, ); expect(bucketIndex).toBe(data[i].bucketIndex); expect(bucketKey).toBe(data[i].bucketKey); @@ -129,7 +132,7 @@ describe('nodes/utils', () => { } }); test('parse NodeGraph lastUpdated buckets db key', async () => { - const lastUpdatedDb = await db.level('lastUpdated'); + const lastUpdatedDbPath = ['lastUpdated']; const data: Array<{ bucketIndex: number; bucketKey: string; @@ -142,28 +145,25 @@ describe('nodes/utils', () => { const bucketKey = lexi.pack(bucketIndex, 'hex'); const lastUpdated = utils.getUnixtime(); const nodeId = testNodesUtils.generateRandomNodeId(); - const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( - lastUpdated, - nodeId, - ); + const nodeIdKey = nodesUtils.bucketDbKey(nodeId); + const lastUpdatedKey = nodesUtils.lastUpdatedKey(lastUpdated); data.push({ bucketIndex, bucketKey, lastUpdated, nodeId, - key: Buffer.concat([Buffer.from(bucketKey), lastUpdatedKey]), + key: Buffer.concat([Buffer.from(bucketKey), lastUpdatedKey, nodeIdKey]), }); - const lastUpdatedDomain = ['lastUpdated', bucketKey]; - await db.put(lastUpdatedDomain, lastUpdatedKey, null); + await db.put(['lastUpdated', bucketKey, lastUpdatedKey, nodeIdKey], null); } // LevelDB will store keys in lexicographic order // Use the key property as a concatenated buffer of // the bucket key and last updated and node ID data.sort((a, b) => Buffer.compare(a.key, b.key)); let i = 0; - for await (const key of lastUpdatedDb.createKeyStream()) { + for await (const [key] of db.iterator({}, lastUpdatedDbPath)) { const { bucketIndex, bucketKey, lastUpdated, nodeId } = - nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + nodesUtils.parseLastUpdatedBucketsDbKey(key as Array); expect(bucketIndex).toBe(data[i].bucketIndex); expect(bucketKey).toBe(data[i].bucketKey); expect(lastUpdated).toBe(data[i].lastUpdated); diff --git a/tests/utils.ts b/tests/utils.ts index 84c67c90e..3ac9a7499 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -14,7 +14,6 @@ import * as keysUtils from '@/keys/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as grpcErrors from '@/grpc/errors'; import { sleep } from '@/utils'; -import * as errors from '@/errors'; import config from '@/config'; /** diff --git a/tests/vaults/VaultInternal.test.ts b/tests/vaults/VaultInternal.test.ts index ab817a538..d95ae1c2c 100644 --- a/tests/vaults/VaultInternal.test.ts +++ b/tests/vaults/VaultInternal.test.ts @@ -678,60 +678,64 @@ describe('VaultInternal', () => { expect(log).toHaveLength(2); expect(log[0].commitId).toStrictEqual(commit); }); - test('garbage collection', async () => { - await vault.writeF(async (efs) => { - await efs.writeFile(secret1.name, secret1.content); - }); - await vault.writeF(async (efs) => { - await efs.writeFile(secret2.name, secret2.content); - }); - await vault.writeF(async (efs) => { - await efs.writeFile(secret3.name, secret3.content); - }); - // @ts-ignore: kidnap efs - const vaultEfs = vault.efs; - // @ts-ignore: kidnap efs - const vaultEfsData = vault.efsVault; - const quickCommit = async (ref: string, secret: string) => { - await vaultEfsData.writeFile(secret, secret); - await git.add({ - fs: vaultEfs, - dir: vault.vaultDataDir, - gitdir: vault.vaultGitDir, - filepath: secret, + test( + 'garbage collection', + async () => { + await vault.writeF(async (efs) => { + await efs.writeFile(secret1.name, secret1.content); }); - return await git.commit({ - fs: vaultEfs, - dir: vault.vaultDataDir, - gitdir: vault.vaultGitDir, - author: { - name: 'test', - email: 'test', - }, - message: 'test', - ref: ref, + await vault.writeF(async (efs) => { + await efs.writeFile(secret2.name, secret2.content); }); - }; - const log = await vault.log(); - let num = 5; - const refs: string[] = []; - for (const logElement of log) { - refs.push(await quickCommit(logElement.commitId, `secret-${num++}`)); - } - // @ts-ignore - await vault.garbageCollectGitObjects(); - - for (const ref of refs) { - await expect( - git.checkout({ + await vault.writeF(async (efs) => { + await efs.writeFile(secret3.name, secret3.content); + }); + // @ts-ignore: kidnap efs + const vaultEfs = vault.efs; + // @ts-ignore: kidnap efs + const vaultEfsData = vault.efsVault; + const quickCommit = async (ref: string, secret: string) => { + await vaultEfsData.writeFile(secret, secret); + await git.add({ fs: vaultEfs, dir: vault.vaultDataDir, gitdir: vault.vaultGitDir, - ref, - }), - ).rejects.toThrow(git.Errors.CommitNotFetchedError); - } - }); + filepath: secret, + }); + return await git.commit({ + fs: vaultEfs, + dir: vault.vaultDataDir, + gitdir: vault.vaultGitDir, + author: { + name: 'test', + email: 'test', + }, + message: 'test', + ref: ref, + }); + }; + const log = await vault.log(); + let num = 5; + const refs: string[] = []; + for (const logElement of log) { + refs.push(await quickCommit(logElement.commitId, `secret-${num++}`)); + } + // @ts-ignore + await vault.garbageCollectGitObjects(); + + for (const ref of refs) { + await expect( + git.checkout({ + fs: vaultEfs, + dir: vault.vaultDataDir, + gitdir: vault.vaultGitDir, + ref, + }), + ).rejects.toThrow(git.Errors.CommitNotFetchedError); + } + }, + global.defaultTimeout * 2, + ); // Locking tests const waitDelay = 200; test('writeF respects read and write locking', async () => { diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 4cf5d50f9..e57495cb9 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -185,7 +185,7 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }, - global.defaultTimeout * 2, + global.defaultTimeout * 4, ); test('can rename a vault', async () => { const vaultManager = await VaultManager.createVaultManager({ @@ -278,49 +278,53 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }); - test('able to read and load existing metadata', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager: {} as NodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - const vaultNames = [ - 'Vault1', - 'Vault2', - 'Vault3', - 'Vault4', - 'Vault5', - 'Vault6', - 'Vault7', - 'Vault8', - 'Vault9', - 'Vault10', - ]; - for (const vaultName of vaultNames) { - await vaultManager.createVault(vaultName as VaultName); - } - const vaults = await vaultManager.listVaults(); - const vaultId = vaults.get('Vault1' as VaultName) as VaultId; - expect(vaultId).not.toBeUndefined(); - await vaultManager.stop(); - await vaultManager.start(); - const restartedVaultNames: Array = []; - const vaultList = await vaultManager.listVaults(); - vaultList.forEach((_, vaultName) => { - restartedVaultNames.push(vaultName); + test( + 'able to read and load existing metadata', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager: {} as NodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - expect(restartedVaultNames.sort()).toEqual(vaultNames.sort()); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + try { + const vaultNames = [ + 'Vault1', + 'Vault2', + 'Vault3', + 'Vault4', + 'Vault5', + 'Vault6', + 'Vault7', + 'Vault8', + 'Vault9', + 'Vault10', + ]; + for (const vaultName of vaultNames) { + await vaultManager.createVault(vaultName as VaultName); + } + const vaults = await vaultManager.listVaults(); + const vaultId = vaults.get('Vault1' as VaultName) as VaultId; + expect(vaultId).not.toBeUndefined(); + await vaultManager.stop(); + await vaultManager.start(); + const restartedVaultNames: Array = []; + const vaultList = await vaultManager.listVaults(); + vaultList.forEach((_, vaultName) => { + restartedVaultNames.push(vaultName); + }); + expect(restartedVaultNames.sort()).toEqual(vaultNames.sort()); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test('cannot concurrently create vaults with the same name', async () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, @@ -884,91 +888,95 @@ describe('VaultManager', () => { await vaultManager?.destroy(); } }); - test('can pull a cloned vault', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - // Creating some state at the remote - await remoteKeynode1.vaultManager.withVaults( - [remoteVaultId], - async (vault) => { - await vault.writeF(async (efs) => { - await efs.writeFile('secret-1', 'secret1'); - }); - }, - ); - - // Setting permissions - await remoteKeynode1.gestaltGraph.setNode({ - id: localNodeIdEncoded, - chain: {}, + test( + 'can pull a cloned vault', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - await remoteKeynode1.gestaltGraph.setGestaltActionByNode( - localNodeId, - 'scan', - ); - await remoteKeynode1.acl.setVaultAction( - remoteVaultId, - localNodeId, - 'clone', - ); - await remoteKeynode1.acl.setVaultAction( - remoteVaultId, - localNodeId, - 'pull', - ); + try { + // Creating some state at the remote + await remoteKeynode1.vaultManager.withVaults( + [remoteVaultId], + async (vault) => { + await vault.writeF(async (efs) => { + await efs.writeFile('secret-1', 'secret1'); + }); + }, + ); - await vaultManager.cloneVault(remoteKeynode1Id, vaultName); - const vaultId = await vaultManager.getVaultId(vaultName); - if (vaultId === undefined) fail('VaultId is not found.'); - await vaultManager.withVaults([vaultId], async (vaultClone) => { - return await vaultClone.readF(async (efs) => { - const file = await efs.readFile('secret-1', { encoding: 'utf8' }); - const secretsList = await efs.readdir('.'); - expect(file).toBe('secret1'); - expect(secretsList).toContain('secret-1'); - expect(secretsList).not.toContain('secret-2'); + // Setting permissions + await remoteKeynode1.gestaltGraph.setNode({ + id: localNodeIdEncoded, + chain: {}, }); - }); + await remoteKeynode1.gestaltGraph.setGestaltActionByNode( + localNodeId, + 'scan', + ); + await remoteKeynode1.acl.setVaultAction( + remoteVaultId, + localNodeId, + 'clone', + ); + await remoteKeynode1.acl.setVaultAction( + remoteVaultId, + localNodeId, + 'pull', + ); - // Creating new history - await remoteKeynode1.vaultManager.withVaults( - [remoteVaultId], - async (vault) => { - await vault.writeF(async (efs) => { - await efs.writeFile('secret-2', 'secret2'); + await vaultManager.cloneVault(remoteKeynode1Id, vaultName); + const vaultId = await vaultManager.getVaultId(vaultName); + if (vaultId === undefined) fail('VaultId is not found.'); + await vaultManager.withVaults([vaultId], async (vaultClone) => { + return await vaultClone.readF(async (efs) => { + const file = await efs.readFile('secret-1', { encoding: 'utf8' }); + const secretsList = await efs.readdir('.'); + expect(file).toBe('secret1'); + expect(secretsList).toContain('secret-1'); + expect(secretsList).not.toContain('secret-2'); }); - }, - ); + }); - // Pulling vault - await vaultManager.pullVault({ - vaultId: vaultId, - }); + // Creating new history + await remoteKeynode1.vaultManager.withVaults( + [remoteVaultId], + async (vault) => { + await vault.writeF(async (efs) => { + await efs.writeFile('secret-2', 'secret2'); + }); + }, + ); - // Should have new data - await vaultManager.withVaults([vaultId], async (vaultClone) => { - return await vaultClone.readF(async (efs) => { - const file = await efs.readFile('secret-1', { encoding: 'utf8' }); - const secretsList = await efs.readdir('.'); - expect(file).toBe('secret1'); - expect(secretsList).toContain('secret-1'); - expect(secretsList).toContain('secret-2'); + // Pulling vault + await vaultManager.pullVault({ + vaultId: vaultId, }); - }); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + + // Should have new data + await vaultManager.withVaults([vaultId], async (vaultClone) => { + return await vaultClone.readF(async (efs) => { + const file = await efs.readFile('secret-1', { encoding: 'utf8' }); + const secretsList = await efs.readdir('.'); + expect(file).toBe('secret1'); + expect(secretsList).toContain('secret-1'); + expect(secretsList).toContain('secret-2'); + }); + }); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test( 'manage pulling from different remotes', async () => { @@ -1105,78 +1113,82 @@ describe('VaultManager', () => { }, global.failedConnectionTimeout, ); - test('able to recover metadata after complex operations', async () => { - const vaultManager = await VaultManager.createVaultManager({ - vaultsPath, - keyManager: dummyKeyManager, - gestaltGraph: {} as GestaltGraph, - nodeConnectionManager, - acl: {} as ACL, - notificationsManager: {} as NotificationsManager, - db, - logger: logger.getChild(VaultManager.name), - }); - try { - const vaultNames = ['Vault1', 'Vault2', 'Vault3', 'Vault4', 'Vault5']; - const alteredVaultNames = [ - 'Vault1', - 'Vault2', - 'Vault3', - 'Vault6', - 'Vault10', - ]; - for (const vaultName of vaultNames) { - await vaultManager.createVault(vaultName as VaultName); - } - const v5 = await vaultManager.getVaultId('Vault5' as VaultName); - expect(v5).not.toBeUndefined(); - await vaultManager.destroyVault(v5!); - const v4 = await vaultManager.getVaultId('Vault4' as VaultName); - expect(v4).toBeTruthy(); - await vaultManager.renameVault(v4!, 'Vault10' as VaultName); - const v6 = await vaultManager.createVault('Vault6' as VaultName); - - await vaultManager.withVaults([v6], async (vault6) => { - await vault6.writeF(async (efs) => { - await efs.writeFile('reloaded', 'reload'); - }); + test( + 'able to recover metadata after complex operations', + async () => { + const vaultManager = await VaultManager.createVaultManager({ + vaultsPath, + keyManager: dummyKeyManager, + gestaltGraph: {} as GestaltGraph, + nodeConnectionManager, + acl: {} as ACL, + notificationsManager: {} as NotificationsManager, + db, + logger: logger.getChild(VaultManager.name), }); - - const vn: Array = []; - (await vaultManager.listVaults()).forEach((_, vaultName) => - vn.push(vaultName), - ); - expect(vn.sort()).toEqual(alteredVaultNames.sort()); - await vaultManager.stop(); - await vaultManager.start(); - await vaultManager.createVault('Vault7' as VaultName); - - const v10 = await vaultManager.getVaultId('Vault10' as VaultName); - expect(v10).not.toBeUndefined(); - alteredVaultNames.push('Vault7'); - expect((await vaultManager.listVaults()).size).toEqual( - alteredVaultNames.length, - ); - const vnAltered: Array = []; - (await vaultManager.listVaults()).forEach((_, vaultName) => - vnAltered.push(vaultName), - ); - expect(vnAltered.sort()).toEqual(alteredVaultNames.sort()); - const file = await vaultManager.withVaults( - [v6], - async (reloadedVault) => { - return await reloadedVault.readF(async (efs) => { - return await efs.readFile('reloaded', { encoding: 'utf8' }); + try { + const vaultNames = ['Vault1', 'Vault2', 'Vault3', 'Vault4', 'Vault5']; + const alteredVaultNames = [ + 'Vault1', + 'Vault2', + 'Vault3', + 'Vault6', + 'Vault10', + ]; + for (const vaultName of vaultNames) { + await vaultManager.createVault(vaultName as VaultName); + } + const v5 = await vaultManager.getVaultId('Vault5' as VaultName); + expect(v5).not.toBeUndefined(); + await vaultManager.destroyVault(v5!); + const v4 = await vaultManager.getVaultId('Vault4' as VaultName); + expect(v4).toBeTruthy(); + await vaultManager.renameVault(v4!, 'Vault10' as VaultName); + const v6 = await vaultManager.createVault('Vault6' as VaultName); + + await vaultManager.withVaults([v6], async (vault6) => { + await vault6.writeF(async (efs) => { + await efs.writeFile('reloaded', 'reload'); }); - }, - ); + }); - expect(file).toBe('reload'); - } finally { - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }); + const vn: Array = []; + (await vaultManager.listVaults()).forEach((_, vaultName) => + vn.push(vaultName), + ); + expect(vn.sort()).toEqual(alteredVaultNames.sort()); + await vaultManager.stop(); + await vaultManager.start(); + await vaultManager.createVault('Vault7' as VaultName); + + const v10 = await vaultManager.getVaultId('Vault10' as VaultName); + expect(v10).not.toBeUndefined(); + alteredVaultNames.push('Vault7'); + expect((await vaultManager.listVaults()).size).toEqual( + alteredVaultNames.length, + ); + const vnAltered: Array = []; + (await vaultManager.listVaults()).forEach((_, vaultName) => + vnAltered.push(vaultName), + ); + expect(vnAltered.sort()).toEqual(alteredVaultNames.sort()); + const file = await vaultManager.withVaults( + [v6], + async (reloadedVault) => { + return await reloadedVault.readF(async (efs) => { + return await efs.readFile('reloaded', { encoding: 'utf8' }); + }); + }, + ); + + expect(file).toBe('reload'); + } finally { + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, + global.defaultTimeout * 2, + ); test('throw when trying to commit to a cloned vault', async () => { const vaultManager = await VaultManager.createVaultManager({ vaultsPath, @@ -1337,7 +1349,7 @@ describe('VaultManager', () => { }); await sleep(200); expect(pullVaultMock).not.toHaveBeenCalled(); - releaseWrite(); + await releaseWrite(); await pullP; expect(pullVaultMock).toHaveBeenCalled(); pullVaultMock.mockClear(); @@ -1363,16 +1375,16 @@ describe('VaultManager', () => { }); await sleep(200); expect(gitPullMock).not.toHaveBeenCalled(); - releaseVaultWrite(); - await pullP2; - expect(gitPullMock).toHaveBeenCalled(); - } finally { - pullVaultMock.mockRestore(); - gitPullMock.mockRestore(); - await vaultManager?.stop(); - await vaultManager?.destroy(); - } - }, + await releaseVaultWrite(); + await pullP2; + expect(gitPullMock).toHaveBeenCalled(); + } finally { + pullVaultMock.mockRestore(); + gitPullMock.mockRestore(); + await vaultManager?.stop(); + await vaultManager?.destroy(); + } + }, global.failedConnectionTimeout, ); }); From a0a42f53d0adf3cc63d05403cfe9d5711c59456f Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 7 Jun 2022 11:23:00 +1000 Subject: [PATCH 32/39] syntax: style fixes Exports moved to end of the file as part of the `export {}` block Cleaned up TODOs and FIXMEs Fixed index and other incorrect imports Type fixes in utils --- src/client/service/nodesGetAll.ts | 15 +++++++-------- src/nodes/NodeConnectionManager.ts | 7 +------ src/nodes/NodeGraph.ts | 2 -- src/nodes/NodeManager.ts | 6 +++--- src/nodes/Queue.ts | 6 +++--- src/nodes/types.ts | 17 ----------------- src/nodes/utils.ts | 15 +++++++-------- src/utils/utils.ts | 5 +++-- src/vaults/VaultInternal.ts | 3 ++- 9 files changed, 26 insertions(+), 50 deletions(-) diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts index bc01e84e0..8c021a248 100644 --- a/src/client/service/nodesGetAll.ts +++ b/src/client/service/nodesGetAll.ts @@ -1,8 +1,9 @@ import type * as grpc from '@grpc/grpc-js'; import type { Authenticate } from '../types'; -import type { KeyManager } from '../../keys'; +import type KeyManager from '../../keys/KeyManager'; import type { NodeId } from '../../nodes/types'; import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; +import type NodeGraph from '../../nodes/NodeGraph'; import { IdInternal } from '@matrixai/id'; import { utils as nodesUtils } from '../../nodes'; import { utils as grpcUtils } from '../../grpc'; @@ -12,11 +13,11 @@ import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; * Retrieves all nodes from all buckets in the NodeGraph. */ function nodesGetAll({ - // NodeGraph, + nodeGraph, keyManager, authenticate, }: { - // NodeGraph: NodeGraph; + nodeGraph: NodeGraph; keyManager: KeyManager; authenticate: Authenticate; }) { @@ -28,10 +29,8 @@ function nodesGetAll({ const response = new nodesPB.NodeBuckets(); const metadata = await authenticate(call.metadata); call.sendMetadata(metadata); - // FIXME: - // const buckets = await nodeGraph.getAllBuckets(); - const buckets: any = []; - for (const b of buckets) { + const buckets = nodeGraph.getBuckets(); + for await (const b of buckets) { let index; for (const id of Object.keys(b)) { const encodedId = nodesUtils.encodeNodeId( @@ -48,7 +47,7 @@ function nodesGetAll({ ); } // Need to either add node to an existing bucket, or create a new - // bucket (if doesn't exist) + // bucket (if it doesn't exist) const bucket = response.getBucketsMap().get(index); if (bucket) { bucket.getNodeTableMap().set(encodedId, address); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 484757f81..8efe369e9 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -112,7 +112,7 @@ class NodeConnectionManager { this.nodeManager = nodeManager; for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; - await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); // FIXME: also fine implicit transactions + await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); } this.logger.info(`Started ${this.constructor.name}`); } @@ -264,7 +264,6 @@ class NodeConnectionManager { )}`, ); // Creating the connection and set in map - // FIXME: this is fine, just use the implicit tran. fix this when adding optional transactions const targetAddress = await this.findNode(targetNodeId); if (targetAddress == null) { throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); @@ -411,7 +410,6 @@ class NodeConnectionManager { return address; } - // FIXME: getClosestNodes was moved to NodeGraph? that needs to be updated. /** * Attempts to locate a target node in the network (using Kademlia). * Adds all discovered, active nodes to the current node's database (up to k @@ -438,7 +436,6 @@ class NodeConnectionManager { // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) - // FIXME: no tran const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, @@ -473,7 +470,6 @@ class NodeConnectionManager { try { // Add the node to the database so that we can find its address in // call to getConnectionToNode - // FIXME: no tran await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); await this.getConnection(nextNodeId, timer); } catch (e) { @@ -496,7 +492,6 @@ class NodeConnectionManager { continue; } if (nodeId.equals(targetNodeId)) { - // FIXME: no tran await this.nodeGraph.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index a5b8da332..c9ebaf0f3 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -679,8 +679,6 @@ class NodeGraph { * current node has less than k nodes in all of its buckets, in which case it * returns all nodes it has knowledge of) */ - // FIXME: this is still operating on assumptions from old code. - // I can't get the gt/lt to work on the iterator. @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getClosestNodes( nodeId: NodeId, diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 23832dbb7..95251648c 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -14,7 +14,7 @@ import type { } from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; -import type { PromiseType } from '../utils/utils'; +import type { PromiseDeconstructed } from '../utils/utils'; import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; @@ -47,8 +47,8 @@ class NodeManager { protected refreshBucketQueue: Set = new Set(); protected refreshBucketQueueRunning: boolean = false; protected refreshBucketQueueRunner: Promise; - protected refreshBucketQueuePlug_: PromiseType = promise(); - protected refreshBucketQueueDrained_: PromiseType = promise(); + protected refreshBucketQueuePlug_: PromiseDeconstructed = promise(); + protected refreshBucketQueueDrained_: PromiseDeconstructed = promise(); protected refreshBucketQueueAbortController: AbortController; constructor({ diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts index 0f9c1485e..602efd5ae 100644 --- a/src/nodes/Queue.ts +++ b/src/nodes/Queue.ts @@ -1,4 +1,4 @@ -import type { PromiseType } from '../utils'; +import type { PromiseDeconstructed } from '../utils'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import * as nodesErrors from './errors'; @@ -11,8 +11,8 @@ class Queue { protected end: boolean = false; protected queue: Array<() => Promise> = []; protected runner: Promise; - protected plug_: PromiseType = promise(); - protected drained_: PromiseType = promise(); + protected plug_: PromiseDeconstructed = promise(); + protected drained_: PromiseDeconstructed = promise(); constructor({ logger }: { logger?: Logger }) { this.logger = logger ?? new Logger(this.constructor.name); diff --git a/src/nodes/types.ts b/src/nodes/types.ts index 8e173b4f2..37775be9d 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -33,8 +33,6 @@ type NodeBucketMeta = { count: number; }; -// Type NodeBucketMetaProps = NonFunctionProperties; - // Just make the bucket entries also // bucketIndex anot as a key // but as the domain @@ -45,20 +43,8 @@ type NodeData = { lastUpdated: number; }; -// Type NodeBucketEntry = { -// address: NodeAddress; -// lastUpdated: Date; -// }; - type SeedNodes = Record; -// FIXME: should have a proper name -type NodeEntry = { - id: NodeId; - address: NodeAddress; - distance: BigInt; -}; - /** * A claim made on a node. That is, can be either: * - a claim from a node -> node @@ -106,9 +92,6 @@ export type { NodeBucketMeta, NodeBucket, NodeData, - NodeEntry, - // NodeBucketEntry, - NodeGraphOp, NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 449fc407b..0078ef784 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -6,12 +6,11 @@ import type { } from './types'; import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; +import { utils as dbUtils } from '@matrixai/db'; import { bytes2BigInt } from '../utils'; import * as keysUtils from '../keys/utils'; -// FIXME: -const prefixBuffer = Buffer.from([33]); -// Const prefixBuffer = Buffer.from(dbUtils.prefix); +const sepBuffer = dbUtils.sep; /** * Encodes the NodeId as a `base32hex` string @@ -94,9 +93,9 @@ function bucketKey(bucketIndex: NodeBucketIndex): string { */ function bucketsDbKey(bucketIndex: NodeBucketIndex, nodeId: NodeId): Buffer { return Buffer.concat([ - prefixBuffer, + sepBuffer, Buffer.from(bucketKey(bucketIndex)), - prefixBuffer, + sepBuffer, bucketDbKey(nodeId), ]); } @@ -117,9 +116,9 @@ function lastUpdatedBucketsDbKey( nodeId: NodeId, ): Buffer { return Buffer.concat([ - prefixBuffer, + sepBuffer, Buffer.from(bucketKey(bucketIndex)), - prefixBuffer, + sepBuffer, lastUpdatedBucketDbKey(lastUpdated, nodeId), ]); } @@ -313,7 +312,7 @@ function generateRandomNodeIdForBucket( } export { - prefixBuffer, + sepBuffer, encodeNodeId, decodeNodeId, bucketIndex, diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 168825b32..0a1519d19 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -170,7 +170,7 @@ function promisify< }; } -export type PromiseType = { +type PromiseDeconstructed = { p: Promise; resolveP: (value: T | PromiseLike) => void; rejectP: (reason?: any) => void; @@ -179,7 +179,7 @@ export type PromiseType = { /** * Deconstructed promise */ -function promise(): PromiseType { +function promise(): PromiseDeconstructed { let resolveP, rejectP; const p = new Promise((resolve, reject) => { resolveP = resolve; @@ -310,6 +310,7 @@ function debounce

( }; } +export type { PromiseDeconstructed }; export { getDefaultNodePath, never, diff --git a/src/vaults/VaultInternal.ts b/src/vaults/VaultInternal.ts index ae2adf6cf..b5e32da06 100644 --- a/src/vaults/VaultInternal.ts +++ b/src/vaults/VaultInternal.ts @@ -35,7 +35,7 @@ import * as validationUtils from '../validation/utils'; import * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; import { never } from '../utils/utils'; -export type RemoteInfo = { +type RemoteInfo = { remoteNode: NodeIdEncoded; remoteVault: VaultIdEncoded; }; @@ -1098,3 +1098,4 @@ class VaultInternal { } export default VaultInternal; +export type { RemoteInfo }; From 7860714dc943c3f5a082f749a025d03008aec174 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 7 Jun 2022 13:24:00 +1000 Subject: [PATCH 33/39] build: removing `node-abort-controller` polyfill Abort controller functionality is included in node now. --- package-lock.json | 5 ----- package.json | 1 - src/nodes/NodeConnectionManager.ts | 1 - src/nodes/NodeManager.ts | 4 +--- 4 files changed, 1 insertion(+), 10 deletions(-) diff --git a/package-lock.json b/package-lock.json index 5ad06be76..ed112e333 100644 --- a/package-lock.json +++ b/package-lock.json @@ -16045,11 +16045,6 @@ } } }, - "node-abort-controller": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.0.1.tgz", - "integrity": "sha512-/ujIVxthRs+7q6hsdjHMaj8hRG9NuWmwrz+JdRwZ14jdFoKSkm+vDsCbF9PLpnSqjaWQJuTmVtcWHNLr+vrOFw==" - }, "node-fetch": { "version": "2.6.7", "requires": { diff --git a/package.json b/package.json index 3759aef1d..83a5806a2 100644 --- a/package.json +++ b/package.json @@ -100,7 +100,6 @@ "jose": "^4.3.6", "lexicographic-integer": "^1.1.0", "multiformats": "^9.4.8", - "node-abort-controller": "^3.0.1", "node-forge": "^0.10.0", "pako": "^1.0.11", "prompts": "^2.4.1", diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 8efe369e9..093fe22d6 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -13,7 +13,6 @@ import type { SeedNodes, } from './types'; import type NodeManager from './NodeManager'; -import type { AbortSignal } from 'node-abort-controller'; import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index 95251648c..ffd9aa18f 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -15,10 +15,8 @@ import type { import type { ClaimEncoded } from '../claims/types'; import type { Timer } from '../types'; import type { PromiseDeconstructed } from '../utils/utils'; -import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; -import { AbortController } from 'node-abort-controller'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; import * as networkUtils from '../network/utils'; @@ -394,7 +392,7 @@ class NodeManager { /** * Adds a node to the node graph. This assumes that you have already authenticated the node * Updates the node if the node already exists - * This operation is blocking by default - set `block` to false to make it non-blocking + * This operation is blocking by default - set `block` 2qto false to make it non-blocking * @param nodeId - Id of the node we wish to add * @param nodeAddress - Expected address of the node we want to add * @param block - Flag for if the operation should block or utilize the async queue From fb46b13ea890732b70b9ef218ba6e5d10d570fc5 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 7 Jun 2022 15:09:46 +1000 Subject: [PATCH 34/39] build: removing unneeded files --- test-iterator.ts | 31 -- test-lexi.ts | 4 - test-nodegraph.ts | 107 ----- test-nodeidgen.ts | 44 --- test-order.ts | 98 ----- test-sorting.ts | 28 -- test-split.ts | 37 -- test-trie.ts | 29 -- tests/nodes/NodeGraph.test.ts.old | 624 ------------------------------ 9 files changed, 1002 deletions(-) delete mode 100644 test-iterator.ts delete mode 100644 test-lexi.ts delete mode 100644 test-nodegraph.ts delete mode 100644 test-nodeidgen.ts delete mode 100644 test-order.ts delete mode 100644 test-sorting.ts delete mode 100644 test-split.ts delete mode 100644 test-trie.ts delete mode 100644 tests/nodes/NodeGraph.test.ts.old diff --git a/test-iterator.ts b/test-iterator.ts deleted file mode 100644 index 82a21762c..000000000 --- a/test-iterator.ts +++ /dev/null @@ -1,31 +0,0 @@ - - -function getYouG () { - console.log('ALREADY EXECUTED'); - return abc(); -} - -async function *abc() { - console.log('START'); - yield 1; - yield 2; - yield 3; -} - -async function main () { - - // we would want that you don't iterate it - - const g = getYouG(); - - await g.next(); - - // console.log('SUP'); - - // for await (const r of abc()) { - // console.log(r); - // } - -} - -main(); diff --git a/test-lexi.ts b/test-lexi.ts deleted file mode 100644 index b48f9cea1..000000000 --- a/test-lexi.ts +++ /dev/null @@ -1,4 +0,0 @@ -import lexi from 'lexicographic-integer'; - - -console.log(lexi.pack(1646203779)); diff --git a/test-nodegraph.ts b/test-nodegraph.ts deleted file mode 100644 index 33bd58bb7..000000000 --- a/test-nodegraph.ts +++ /dev/null @@ -1,107 +0,0 @@ -import type { NodeId, NodeAddress } from './src/nodes/types'; -import { DB } from '@matrixai/db'; -import { IdInternal } from '@matrixai/id'; -import * as keysUtils from './src/keys/utils'; -import * as nodesUtils from './src/nodes/utils'; -import NodeGraph from './src/nodes/NodeGraph'; -import KeyManager from './src/keys/KeyManager'; - -function generateRandomNodeId(readable: boolean = false): NodeId { - if (readable) { - const random = keysUtils.getRandomBytesSync(16).toString('hex'); - return IdInternal.fromString(random); - } else { - const random = keysUtils.getRandomBytesSync(32); - return IdInternal.fromBuffer(random); - } -} - -async function main () { - - const db = await DB.createDB({ - dbPath: './tmp/db' - }); - - const keyManager = await KeyManager.createKeyManager({ - keysPath: './tmp/keys', - password: 'abc123', - // fresh: true - }); - - const nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - fresh: true - }); - - for (let i = 0; i < 10; i++) { - await nodeGraph.setNode( - generateRandomNodeId(), - { - host: '127.0.0.1', - port: 55555 - } as NodeAddress - ); - } - - for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { - - // the bucket lengths are wrong - console.log( - 'BucketIndex', - bucketIndex, - 'Bucket Count', - bucket.length, - ); - - // console.log(bucket); - for (const [nodeId, nodeData] of bucket) { - // console.log('NODEID', nodeId); - // console.log('NODEDATA', nodeData); - // console.log(nodeData.address); - } - } - - for await (const [nodeId, nodeData] of nodeGraph.getNodes()) { - // console.log(nodeId, nodeData); - } - - const bucket = await nodeGraph.getBucket(255, 'lastUpdated'); - console.log(bucket.length); - - // console.log('OLD NODE ID', keyManager.getNodeId()); - // const newNodeId = generateRandomNodeId(); - // console.log('NEW NODE ID', newNodeId); - - // console.log('---------FIRST RESET--------'); - - // await nodeGraph.resetBuckets(newNodeId); - // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { - // console.log( - // 'BucketIndex', - // bucketIndex, - // 'Bucket Count', - // Object.keys(bucket).length - // ); - // } - - - // console.log('---------SECOND RESET--------'); - // const newNodeId2 = generateRandomNodeId(); - // await nodeGraph.resetBuckets(newNodeId2); - - // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { - // console.log( - // 'BucketIndex', - // bucketIndex, - // 'Bucket Count', - // Object.keys(bucket).length - // ); - // } - - await nodeGraph.stop(); - await keyManager.stop(); - await db.stop(); -} - -main(); diff --git a/test-nodeidgen.ts b/test-nodeidgen.ts deleted file mode 100644 index 2f79bddda..000000000 --- a/test-nodeidgen.ts +++ /dev/null @@ -1,44 +0,0 @@ -import type { NodeId } from './src/nodes/types'; -import { IdInternal } from '@matrixai/id'; -import * as keysUtils from './src/keys/utils'; -import * as nodesUtils from './src/nodes/utils'; - -function generateRandomNodeId(readable: boolean = false): NodeId { - if (readable) { - const random = keysUtils.getRandomBytesSync(16).toString('hex'); - return IdInternal.fromString(random); - } else { - const random = keysUtils.getRandomBytesSync(32); - return IdInternal.fromBuffer(random); - } -} - -async function main () { - - const firstNodeId = generateRandomNodeId(); - - - let lastBucket = 0; - let penultimateBucket = 0; - let lowerBuckets = 0; - - for (let i = 0; i < 1000; i++) { - const nodeId = generateRandomNodeId(); - const bucketIndex = nodesUtils.bucketIndex(firstNodeId, nodeId); - if (bucketIndex === 255) { - lastBucket++; - } else if (bucketIndex === 254) { - penultimateBucket++; - } else { - lowerBuckets++; - } - } - - console.log(lastBucket); - console.log(penultimateBucket); - console.log(lowerBuckets); - - -} - -main(); diff --git a/test-order.ts b/test-order.ts deleted file mode 100644 index f6046d6da..000000000 --- a/test-order.ts +++ /dev/null @@ -1,98 +0,0 @@ -import { DB } from '@matrixai/db'; -import lexi from 'lexicographic-integer'; -import { getUnixtime, hex2Bytes } from './src/utils'; - -async function main () { - - const db = await DB.createDB({ - dbPath: './tmp/orderdb', - fresh: true - }); - - await db.put([], 'node1', 'value'); - await db.put([], 'node2', 'value'); - await db.put([], 'node3', 'value'); - await db.put([], 'node4', 'value'); - await db.put([], 'node5', 'value'); - await db.put([], 'node6', 'value'); - await db.put([], 'node7', 'value'); - - const now = new Date; - const t1 = new Date(now.getTime() + 1000 * 1); - const t2 = new Date(now.getTime() + 1000 * 2); - const t3 = new Date(now.getTime() + 1000 * 3); - const t4 = new Date(now.getTime() + 1000 * 4); - const t5 = new Date(now.getTime() + 1000 * 5); - const t6 = new Date(now.getTime() + 1000 * 6); - const t7 = new Date(now.getTime() + 1000 * 7); - - // so unix time is only what we really need to know - // further precision is unlikely - // and hex-packed time is shorter keys - // so it is likely faster - // the only issue is that unpacking requires - // converting hex into bytes, then into strings - - // console.log(t1.getTime()); - // console.log(getUnixtime(t1)); - // console.log(lexi.pack(getUnixtime(t1), 'hex')); - // console.log(lexi.pack(t1.getTime(), 'hex')); - // console.log(t1.toISOString()); - - - // buckets0!BUCKETINDEX!NODEID - // buckets0!BUCKETINDEX!date - - // Duplicate times that are put here - // But differentiate by the node1, node2 - await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node1', 'value'); - await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node2', 'value'); - - await db.put([], lexi.pack(getUnixtime(t1), 'hex') + '-node3', 'value'); - await db.put([], lexi.pack(getUnixtime(t4), 'hex') + '-node4', 'value'); - await db.put([], lexi.pack(getUnixtime(t3), 'hex') + '-node5', 'value'); - await db.put([], lexi.pack(getUnixtime(t2), 'hex') + '-node6', 'value'); - await db.put([], lexi.pack(getUnixtime(t5), 'hex') + '-node7', 'value'); - - // await db.put([], t6.toISOString() + '-node1', 'value'); - // await db.put([], t6.toISOString() + '-node2', 'value'); - - // await db.put([], t1.toISOString() + '-node3', 'value'); - // await db.put([], t4.toISOString() + '-node4', 'value'); - // await db.put([], t3.toISOString() + '-node5', 'value'); - // await db.put([], t2.toISOString() + '-node6', 'value'); - // await db.put([], t5.toISOString() + '-node7', 'value'); - - // Why did this require `-node3` - - // this will awlays get one or the other - - // ok so we if we want to say get a time - // or order it by time - // we are goingto have to create read stream over the bucket right? - // yea so we would have another sublevel, or at least a sublevel formed by the bucket - // one that is the bucket index - // so that would be the correct way to do it - - for await (const o of db.db.createReadStream({ - gte: lexi.pack(getUnixtime(t1), 'hex'), - limit: 1, - // keys: true, - // values: true, - // lte: lexi.pack(getUnixtime(t6)) - })) { - - console.log(o.key.toString()); - - } - - await db.stop(); - - - // so it works - // now if you give it something liek - - -} - -main(); diff --git a/test-sorting.ts b/test-sorting.ts deleted file mode 100644 index 1692fa83f..000000000 --- a/test-sorting.ts +++ /dev/null @@ -1,28 +0,0 @@ -import * as testNodesUtils from './tests/nodes/utils'; - -const arr = [ - { a: 'abc', b: 3}, - { a: 'abc', b: 1}, - { a: 'abc', b: 0}, -]; - -arr.sort((a, b): number => { - if (a.b > b.b) { - return 1; - } else if (a.b < b.b) { - return -1; - } else { - return 0; - } -}); - -console.log(arr); - -const arr2 = [3, 1, 0]; - -arr2.sort(); - -console.log(arr2); - - -console.log(testNodesUtils.generateRandomNodeId()); diff --git a/test-split.ts b/test-split.ts deleted file mode 100644 index ee06d75d6..000000000 --- a/test-split.ts +++ /dev/null @@ -1,37 +0,0 @@ - -function bufferSplit(input: Buffer, delimiter?: Buffer): Array { - const output: Array = []; - let delimiterIndex = 0; - let chunkIndex = 0; - if (delimiter != null) { - while (true) { - const i = input.indexOf( - delimiter, - delimiterIndex - ); - if (i > -1) { - output.push(input.subarray(chunkIndex, i)); - delimiterIndex = i + delimiter.byteLength; - chunkIndex = i + delimiter.byteLength; - } else { - output.push(input.subarray(chunkIndex)); - break; - } - } - } else { - for (let i = 0; i < input.byteLength; i++) { - output.push(input.subarray(i, i + 1)); - } - } - return output; -} - - -const b = Buffer.from('!a!!b!'); - -console.log(bufferSplit(b, Buffer.from('!!'))); -console.log(bufferSplit(b)); - -const s = '!a!!b!'; - -console.log(s.split('!!')); diff --git a/test-trie.ts b/test-trie.ts deleted file mode 100644 index a17c4165d..000000000 --- a/test-trie.ts +++ /dev/null @@ -1,29 +0,0 @@ -import * as utils from './src/utils'; -import * as nodesUtils from './src/nodes/utils'; - -// 110 -const ownNodeId = Buffer.from([6]); - -const i = 2; - -const maxDistance = utils.bigInt2Bytes(BigInt(2 ** i)); -const minDistance = utils.bigInt2Bytes(BigInt(2 ** (i - 1))); - -console.log('max distance', maxDistance, utils.bytes2Bits(maxDistance)); -console.log('min distance', minDistance, utils.bytes2Bits(minDistance)); - -// ownNodeId XOR maxdistance = GTE node id -const gte = ownNodeId.map((byte, i) => byte ^ maxDistance[i]); - -// ownNodeId XOR mindistance = LT node id -const lt = ownNodeId.map((byte, i) => byte ^ minDistance[i]); - -console.log('Lowest Distance Node (inc)', gte, utils.bytes2Bits(gte)); -console.log('Greatest Distance Node (exc)', lt, utils.bytes2Bits(lt)); - -// function nodeDistance(nodeId1: Buffer, nodeId2: Buffer): bigint { -// const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); -// return utils.bytes2BigInt(distance); -// } - -// console.log(nodeDistance(ownNodeId, Buffer.from([0]))); diff --git a/tests/nodes/NodeGraph.test.ts.old b/tests/nodes/NodeGraph.test.ts.old deleted file mode 100644 index 1960c02d3..000000000 --- a/tests/nodes/NodeGraph.test.ts.old +++ /dev/null @@ -1,624 +0,0 @@ -import type { Host, Port } from '@/network/types'; -import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; -import os from 'os'; -import path from 'path'; -import fs from 'fs'; -import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { DB } from '@matrixai/db'; -import { IdInternal } from '@matrixai/id'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; -import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; -import KeyManager from '@/keys/KeyManager'; -import * as keysUtils from '@/keys/utils'; -import ForwardProxy from '@/network/ForwardProxy'; -import ReverseProxy from '@/network/ReverseProxy'; -import * as nodesUtils from '@/nodes/utils'; -import Sigchain from '@/sigchain/Sigchain'; -import * as nodesTestUtils from './utils'; - -describe(`${NodeGraph.name} test`, () => { - const password = 'password'; - let nodeGraph: NodeGraph; - let nodeId: NodeId; - - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; - - const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ - new StreamHandler(), - ]); - let fwdProxy: ForwardProxy; - let revProxy: ReverseProxy; - let dataDir: string; - let keyManager: KeyManager; - let db: DB; - let nodeConnectionManager: NodeConnectionManager; - let sigchain: Sigchain; - - const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; - - const mockedGenerateDeterministicKeyPair = jest.spyOn( - keysUtils, - 'generateDeterministicKeyPair', - ); - - beforeEach(async () => { - mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { - return keysUtils.generateKeyPair(bits); - }); - - dataDir = await fs.promises.mkdtemp( - path.join(os.tmpdir(), 'polykey-test-'), - ); - const keysPath = `${dataDir}/keys`; - keyManager = await KeyManager.createKeyManager({ - password, - keysPath, - logger, - }); - fwdProxy = new ForwardProxy({ - authToken: 'auth', - logger: logger, - }); - - revProxy = new ReverseProxy({ - logger: logger, - }); - - await fwdProxy.start({ - tlsConfig: { - keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, - certChainPem: await keyManager.getRootCertChainPem(), - }, - }); - const dbPath = `${dataDir}/db`; - db = await DB.createDB({ - dbPath, - logger, - crypto: { - key: keyManager.dbKey, - ops: { - encrypt: keysUtils.encryptWithKey, - decrypt: keysUtils.decryptWithKey, - }, - }, - }); - sigchain = await Sigchain.createSigchain({ - keyManager: keyManager, - db: db, - logger: logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); - nodeConnectionManager = new NodeConnectionManager({ - keyManager: keyManager, - nodeGraph: nodeGraph, - fwdProxy: fwdProxy, - revProxy: revProxy, - logger: logger, - }); - await nodeConnectionManager.start(); - // Retrieve the NodeGraph reference from NodeManager - nodeId = keyManager.getNodeId(); - }); - - afterEach(async () => { - await db.stop(); - await sigchain.stop(); - await nodeConnectionManager.stop(); - await nodeGraph.stop(); - await keyManager.stop(); - await fwdProxy.stop(); - await fs.promises.rm(dataDir, { - force: true, - recursive: true, - }); - }); - - test('NodeGraph readiness', async () => { - const nodeGraph2 = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); - // @ts-ignore - await expect(nodeGraph2.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeGraphRunning, - ); - // Should be a noop - await nodeGraph2.start(); - await nodeGraph2.stop(); - await nodeGraph2.destroy(); - await expect(async () => { - await nodeGraph2.start(); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - }); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeGraph.setNode(nodeId1, nodeAddress1); - expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); - - // Unknown node - expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); - }); - test('finds correct node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address - const foundAddress = await nodeGraph.getNode(newNode2Id); - expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); - }); - test('unable to find node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address (of non-existent node) - const foundAddress = await nodeGraph.getNode(dummyNode); - expect(foundAddress).toBeUndefined(); - }); - test('adds a single node into a bucket', async () => { - // New node added - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Check new node is in retrieved bucket from database - // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 - const bucket = await nodeGraph.getBucket(1); - expect(bucket).toBeDefined(); - expect(bucket![newNode2Id]).toEqual({ - address: { host: '227.1.1.1', port: 4567 }, - lastUpdated: expect.any(Date), - }); - }); - test('adds multiple nodes into the same bucket', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, - ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, - ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, - ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(4); - expect(bucket).toBeDefined(); - if (!bucket) fail('bucket should be defined, letting TS know'); - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - }); - test('adds a single node into different buckets', async () => { - // New node for bucket 3 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); - const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - // New node for bucket 255 (the highest possible bucket) - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const bucket3 = await nodeGraph.getBucket(3); - const bucket351 = await nodeGraph.getBucket(255); - if (bucket3 && bucket351) { - expect(bucket3[newNode1Id]).toEqual({ - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(Date), - }); - expect(bucket351[newNode2Id]).toEqual({ - address: { host: '2.2.2.2', port: 2222 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('deletes a single node (and removes bucket)', async () => { - // New node for bucket 2 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - // Check the bucket is there first - const bucket = await nodeGraph.getBucket(2); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check bucket no longer exists - const newBucket = await nodeGraph.getBucket(2); - expect(newBucket).toBeUndefined(); - }); - test('deletes a single node (and retains remainder of bucket)', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, - ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, - ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, - ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(bucketIndex); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(bucketIndex); - if (newBucket) { - expect(newBucket[newNode1Id]).toBeUndefined(); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('New bucket undefined'); - } - }); - test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - // Keep a record of the first node ID that we added - const firstNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - ); - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), - nodeAddress, - ); - // Increment the current node ID - } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, - ); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Attempt to add a new node into this full bucket (increment the last node - // ID that was added) - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - nodeGraph.maxNodesPerBucket + 1, - ); - const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; - await nodeGraph.setNode(newNodeId, newNodeAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket (but no more) - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, - ); - // Ensure that this new node is in the bucket - expect(finalBucket[newNodeId]).toEqual({ - address: newNodeAddress, - lastUpdated: expect.any(Date), - }); - // NODEID1 should have been removed from this bucket (as this was the least active) - // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first) - expect(finalBucket[firstNodeId]).toBeUndefined(); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - const currNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - ); - // Keep a record of the first node ID that we added - // const firstNodeId = currNodeId; - let increment = 1; - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), - nodeAddress, - ); - // Increment the current node ID - skip for the last one to keep currNodeId - // as the last added node ID - if (i !== nodeGraph.maxNodesPerBucket) { - increment++; - } - } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, - ); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active" - // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one - // So, re-add the last node - const newLastAddress: NodeAddress = { - host: '30.30.30.30' as Host, - port: 30 as Port, - }; - await nodeGraph.setNode(currNodeId, newLastAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, - ); - // Ensure that this new node is in the bucket - expect(finalBucket[currNodeId]).toEqual({ - address: newLastAddress, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('retrieves all buckets (in expected lexicographic order)', async () => { - // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) - // Bucket 1 (minimum): - - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Bucket 4 (multiple nodes in 1 bucket): - const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); - const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; - await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); - const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; - await nodeGraph.setNode(node42Id, node42Address); - - // Bucket 10 (lexicographic ordering - should appear after 2): - const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); - const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; - await nodeGraph.setNode(node10Id, node10Address); - - // Bucket 255 (maximum): - const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const node255Address = { - host: '255.255.255.255', - port: 255, - } as NodeAddress; - await nodeGraph.setNode(node255Id, node255Address); - - const buckets = await nodeGraph.getAllBuckets(); - expect(buckets.length).toBe(4); - // Buckets should be returned in lexicographic ordering (using hex keys to - // ensure the bucket indexes are in numberical order) - expect(buckets).toEqual([ - { - [node1Id]: { - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(String), - }, - }, - { - [node41Id]: { - address: { host: '41.41.41.41', port: 4141 }, - lastUpdated: expect.any(String), - }, - [node42Id]: { - address: { host: '42.42.42.42', port: 4242 }, - lastUpdated: expect.any(String), - }, - }, - { - [node10Id]: { - address: { host: '10.10.10.10', port: 1010 }, - lastUpdated: expect.any(String), - }, - }, - { - [node255Id]: { - address: { host: '255.255.255.255', port: 255 }, - lastUpdated: expect.any(String), - }, - }, - ]); - }); - test( - 'refreshes buckets', - async () => { - const initialNodes: Record = {}; - // Generate and add some nodes - for (let i = 1; i < 255; i += 20) { - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - keyManager.getNodeId(), - i, - ); - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode(newNodeId, nodeAddress); - initialNodes[newNodeId] = { - id: newNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance( - keyManager.getNodeId(), - newNodeId, - ), - }; - } - - // Renew the keypair - await keyManager.renewRootKeyPair('newPassword'); - // Reset the test's node ID state - nodeId = keyManager.getNodeId(); - // Refresh the buckets - await nodeGraph.refreshBuckets(); - - // Get all the new buckets, and expect that each node is in the correct bucket - const newBuckets = await nodeGraph.getAllBuckets(); - let nodeCount = 0; - for (const b of newBuckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - // Check that it was a node in the original DB - expect(initialNodes[nodeId]).toBeDefined(); - // Check it's in the correct bucket - const expectedIndex = nodesUtils.calculateBucketIndex( - keyManager.getNodeId(), - nodeId, - ); - const expectedBucket = await nodeGraph.getBucket(expectedIndex); - expect(expectedBucket).toBeDefined(); - expect(expectedBucket![nodeId]).toBeDefined(); - // Check it has the correct address - expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); - nodeCount++; - } - } - // We had less than k (20) nodes, so we expect that all nodes will be re-added - // If we had more than k nodes, we may lose some of them (because the nodes - // may be re-added to newly full buckets) - expect(Object.keys(initialNodes).length).toEqual(nodeCount); - }, - global.defaultTimeout * 4, - ); - test('updates node', async () => { - // New node added - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Check new node is in retrieved bucket from database - const bucket = await nodeGraph.getBucket(2); - const time1 = bucket![node1Id].lastUpdated; - - // Update node and check that time is later - const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; - await nodeGraph.updateNode(node1Id, newNode1Address); - - const bucket2 = await nodeGraph.getBucket(2); - const time2 = bucket2![node1Id].lastUpdated; - expect(bucket2![node1Id].address).toEqual(newNode1Address); - expect(time1 < time2).toBeTruthy(); - }); -}); From 891fb8cde6a7a5e22253ad74beacd2b738b93154 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Tue, 7 Jun 2022 16:59:35 +1000 Subject: [PATCH 35/39] fix: `NodeConnectionManager.syncNodeGraph` now pings nodes Need to ensure validity of nodes by pinging them before adding them to the node graph. #322 --- src/bin/nodes/CommandAdd.ts | 12 +- src/bin/utils/options.ts | 11 + src/client/service/nodesAdd.ts | 22 +- src/nodes/NodeConnectionManager.ts | 64 +++-- src/nodes/NodeManager.ts | 6 + src/nodes/errors.ts | 6 + src/proto/js/google/protobuf/any_pb.js | 1 + src/proto/js/google/protobuf/descriptor_pb.js | 1 + src/proto/js/google/protobuf/duration_pb.js | 1 + src/proto/js/google/protobuf/empty_pb.js | 1 + src/proto/js/google/protobuf/field_mask_pb.js | 1 + src/proto/js/google/protobuf/struct_pb.js | 1 + src/proto/js/google/protobuf/timestamp_pb.js | 1 + src/proto/js/google/protobuf/wrappers_pb.js | 1 + src/proto/js/polykey/v1/agent/agent_pb.js | 1 + src/proto/js/polykey/v1/agent_service_pb.js | 1 + .../js/polykey/v1/client_service_grpc_pb.d.ts | 20 +- .../js/polykey/v1/client_service_grpc_pb.js | 17 +- src/proto/js/polykey/v1/client_service_pb.js | 1 + .../js/polykey/v1/gestalts/gestalts_pb.js | 1 + .../js/polykey/v1/identities/identities_pb.js | 1 + src/proto/js/polykey/v1/keys/keys_pb.js | 1 + src/proto/js/polykey/v1/nodes/nodes_pb.d.ts | 32 +++ src/proto/js/polykey/v1/nodes/nodes_pb.js | 264 ++++++++++++++++++ .../v1/notifications/notifications_pb.js | 1 + .../polykey/v1/permissions/permissions_pb.js | 1 + src/proto/js/polykey/v1/secrets/secrets_pb.js | 1 + .../js/polykey/v1/sessions/sessions_pb.js | 1 + src/proto/js/polykey/v1/test_service_pb.js | 1 + src/proto/js/polykey/v1/utils/utils_pb.js | 1 + src/proto/js/polykey/v1/vaults/vaults_pb.js | 1 + .../schemas/polykey/v1/client_service.proto | 2 +- .../schemas/polykey/v1/nodes/nodes.proto | 7 + tests/bin/nodes/add.test.ts | 98 ++++++- tests/bin/nodes/find.test.ts | 60 ++-- tests/client/service/nodesAdd.test.ts | 8 +- .../NodeConnectionManager.general.test.ts | 35 ++- .../NodeConnectionManager.seednodes.test.ts | 38 ++- 38 files changed, 639 insertions(+), 84 deletions(-) diff --git a/src/bin/nodes/CommandAdd.ts b/src/bin/nodes/CommandAdd.ts index fdf49f48e..49ea3105a 100644 --- a/src/bin/nodes/CommandAdd.ts +++ b/src/bin/nodes/CommandAdd.ts @@ -18,6 +18,8 @@ class CommandAdd extends CommandPolykey { this.addOption(binOptions.nodeId); this.addOption(binOptions.clientHost); this.addOption(binOptions.clientPort); + this.addOption(binOptions.forceNodeAdd); + this.addOption(binOptions.noPing); this.action(async (nodeId: NodeId, host: Host, port: Port, options) => { const { default: PolykeyClient } = await import('../../PolykeyClient'); const nodesUtils = await import('../../nodes/utils'); @@ -46,13 +48,15 @@ class CommandAdd extends CommandPolykey { port: clientOptions.clientPort, logger: this.logger.getChild(PolykeyClient.name), }); - const nodeAddressMessage = new nodesPB.NodeAddress(); - nodeAddressMessage.setNodeId(nodesUtils.encodeNodeId(nodeId)); - nodeAddressMessage.setAddress( + const nodeAddMessage = new nodesPB.NodeAdd(); + nodeAddMessage.setNodeId(nodesUtils.encodeNodeId(nodeId)); + nodeAddMessage.setAddress( new nodesPB.Address().setHost(host).setPort(port), ); + nodeAddMessage.setForce(options.force); + nodeAddMessage.setPing(options.ping); await binUtils.retryAuthentication( - (auth) => pkClient.grpcClient.nodesAdd(nodeAddressMessage, auth), + (auth) => pkClient.grpcClient.nodesAdd(nodeAddMessage, auth), meta, ); } finally { diff --git a/src/bin/utils/options.ts b/src/bin/utils/options.ts index bed18d65a..f2da17b8c 100644 --- a/src/bin/utils/options.ts +++ b/src/bin/utils/options.ts @@ -154,6 +154,15 @@ const pullVault = new commander.Option( 'Name or Id of the vault to pull from', ); +const forceNodeAdd = new commander.Option( + '--force', + 'Force adding node to nodeGraph', +).default(false); + +const noPing = new commander.Option('--no-ping', 'Skip ping step').default( + true, +); + export { nodePath, format, @@ -176,4 +185,6 @@ export { network, workers, pullVault, + forceNodeAdd, + noPing, }; diff --git a/src/client/service/nodesAdd.ts b/src/client/service/nodesAdd.ts index 0d993c746..3b6043219 100644 --- a/src/client/service/nodesAdd.ts +++ b/src/client/service/nodesAdd.ts @@ -6,6 +6,7 @@ import type { NodeId, NodeAddress } from '../../nodes/types'; import type { Host, Hostname, Port } from '../../network/types'; import type * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; import type Logger from '@matrixai/logger'; +import * as nodeErrors from '../../nodes/errors'; import * as grpcUtils from '../../grpc/utils'; import { validateSync } from '../../validation'; import * as validationUtils from '../../validation/utils'; @@ -30,12 +31,13 @@ function nodesAdd({ logger: Logger; }) { return async ( - call: grpc.ServerUnaryCall, + call: grpc.ServerUnaryCall, callback: grpc.sendUnaryData, ): Promise => { try { const response = new utilsPB.EmptyMessage(); const metadata = await authenticate(call.metadata); + const request = call.request; call.sendMetadata(metadata); const { nodeId, @@ -55,11 +57,21 @@ function nodesAdd({ ); }, { - nodeId: call.request.getNodeId(), - host: call.request.getAddress()?.getHost(), - port: call.request.getAddress()?.getPort(), + nodeId: request.getNodeId(), + host: request.getAddress()?.getHost(), + port: request.getAddress()?.getPort(), }, ); + // Pinging to authenticate the node + if ( + request.getPing() && + !(await nodeManager.pingNode(nodeId, { host, port })) + ) { + throw new nodeErrors.ErrorNodePingFailed( + 'Failed to authenticate target node', + ); + } + await db.withTransactionF(async (tran) => nodeManager.setNode( nodeId, @@ -68,7 +80,7 @@ function nodesAdd({ port, } as NodeAddress, true, - true, + request.getForce(), undefined, tran, ), diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 093fe22d6..f39f333d8 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -111,7 +111,12 @@ class NodeConnectionManager { this.nodeManager = nodeManager; for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; - await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); + await this.nodeManager.setNode( + nodeId, + this.seedNodes[nodeIdEncoded], + true, + true, + ); } this.logger.info(`Started ${this.constructor.name}`); } @@ -431,10 +436,14 @@ class NodeConnectionManager { timer?: Timer, options: { signal?: AbortSignal } = {}, ): Promise { + const localNodeId = this.keyManager.getNodeId(); const { signal } = { ...options }; // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) + // FIXME? this is an array. Shouldn't it be a set? + // It's possible for this to grow faster than we can consume it, + // doubly so if we allow duplicates const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, @@ -466,13 +475,15 @@ class NodeConnectionManager { } // Connect to the node (check if pre-existing connection exists, otherwise // create a new one) - try { - // Add the node to the database so that we can find its address in - // call to getConnectionToNode - await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); - await this.getConnection(nextNodeId, timer); - } catch (e) { - // If we can't connect to the node, then skip it + if ( + await this.pingNode( + nextNodeId, + nextNodeAddress.address.host, + nextNodeAddress.address.port, + ) + ) { + await this.nodeManager!.setNode(nextNodeId, nextNodeAddress.address); + } else { continue; } contacted[nextNodeId] = true; @@ -486,12 +497,19 @@ class NodeConnectionManager { // them to the shortlist for (const [nodeId, nodeData] of foundClosest) { if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); - // Ignore a`ny nodes that have been contacted - if (contacted[nodeId]) { + // Ignore any nodes that have been contacted or our own node + if (contacted[nodeId] || localNodeId.equals(nodeId)) { continue; } - if (nodeId.equals(targetNodeId)) { - await this.nodeGraph.setNode(nodeId, nodeData.address); + if ( + nodeId.equals(targetNodeId) && + (await this.pingNode( + nodeId, + nodeData.address.host, + nodeData.address.port, + )) + ) { + await this.nodeManager!.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it // in the shortlist @@ -555,7 +573,9 @@ class NodeConnectionManager { host: address.getHost() as Host | Hostname, port: address.getPort() as Port, }, - lastUpdated: 0, // FIXME? + // Not really needed + // But if it's needed then we need to add the information to the proto definition + lastUpdated: 0, }, ]); } @@ -589,15 +609,20 @@ class NodeConnectionManager { this.keyManager.getNodeId(), timer, ); - // FIXME: we need to ping a node before setting it for (const [nodeId, nodeData] of nodes) { + const pingAndAddNode = async () => { + const port = nodeData.address.port; + const host = await networkUtils.resolveHost(nodeData.address.host); + if (await this.pingNode(nodeId, host, port)) { + await this.nodeManager!.setNode(nodeId, nodeData.address, true); + } + }; + if (!block) { - this.queue.push(() => - this.nodeManager!.setNode(nodeId, nodeData.address), - ); + this.queue.push(pingAndAddNode); } else { try { - await this.nodeManager?.setNode(nodeId, nodeData.address); + await pingAndAddNode(); } catch (e) { if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; } @@ -703,10 +728,11 @@ class NodeConnectionManager { @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async pingNode( nodeId: NodeId, - host: Host, + host: Host | Hostname, port: Port, timer?: Timer, ): Promise { + host = await networkUtils.resolveHost(host); // If we can create a connection then we have punched though the NAT, // authenticated and confirmed the nodeId matches const proxyAddress = networkUtils.buildAddress( diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index ffd9aa18f..bb264de4f 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -410,6 +410,12 @@ class NodeManager { timeout?: number, tran?: DBTransaction, ): Promise { + // We don't want to add our own node + if (nodeId.equals(this.keyManager.getNodeId())) { + this.logger.debug('Is own NodeId, skipping'); + return; + } + if (tran == null) { return this.db.withTransactionF(async (tran) => this.setNode(nodeId, nodeAddress, block, force, timeout, tran), diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index b35a58f70..bc0185025 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -86,6 +86,11 @@ class ErrorNodeConnectionHostWildcard extends ErrorNodes { static description = 'An IP wildcard was provided for the target host'; exitCode = sysexits.USAGE; } +class ErrorNodePingFailed extends ErrorNodes { + static description = + 'Failed to ping the node when attempting to authenticate'; + exitCode = sysexits.NOHOST; +} export { ErrorNodes, @@ -106,4 +111,5 @@ export { ErrorNodeConnectionPublicKeyNotFound, ErrorNodeConnectionManagerNotRunning, ErrorNodeConnectionHostWildcard, + ErrorNodePingFailed, }; diff --git a/src/proto/js/google/protobuf/any_pb.js b/src/proto/js/google/protobuf/any_pb.js index 2154f2078..cec1761c8 100644 --- a/src/proto/js/google/protobuf/any_pb.js +++ b/src/proto/js/google/protobuf/any_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/descriptor_pb.js b/src/proto/js/google/protobuf/descriptor_pb.js index 64e84878b..9c345b93d 100644 --- a/src/proto/js/google/protobuf/descriptor_pb.js +++ b/src/proto/js/google/protobuf/descriptor_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/duration_pb.js b/src/proto/js/google/protobuf/duration_pb.js index 74166f0fd..1b5f0fd84 100644 --- a/src/proto/js/google/protobuf/duration_pb.js +++ b/src/proto/js/google/protobuf/duration_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/empty_pb.js b/src/proto/js/google/protobuf/empty_pb.js index d85fa310a..bd5d8a4e1 100644 --- a/src/proto/js/google/protobuf/empty_pb.js +++ b/src/proto/js/google/protobuf/empty_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/field_mask_pb.js b/src/proto/js/google/protobuf/field_mask_pb.js index 67860a3a2..34e581b04 100644 --- a/src/proto/js/google/protobuf/field_mask_pb.js +++ b/src/proto/js/google/protobuf/field_mask_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/struct_pb.js b/src/proto/js/google/protobuf/struct_pb.js index bff1ed412..b16b8b2fa 100644 --- a/src/proto/js/google/protobuf/struct_pb.js +++ b/src/proto/js/google/protobuf/struct_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/timestamp_pb.js b/src/proto/js/google/protobuf/timestamp_pb.js index 6881a1d93..a270c1c47 100644 --- a/src/proto/js/google/protobuf/timestamp_pb.js +++ b/src/proto/js/google/protobuf/timestamp_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/google/protobuf/wrappers_pb.js b/src/proto/js/google/protobuf/wrappers_pb.js index 9c89af542..458e1b436 100644 --- a/src/proto/js/google/protobuf/wrappers_pb.js +++ b/src/proto/js/google/protobuf/wrappers_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/agent/agent_pb.js b/src/proto/js/polykey/v1/agent/agent_pb.js index 13a458c48..29361addf 100644 --- a/src/proto/js/polykey/v1/agent/agent_pb.js +++ b/src/proto/js/polykey/v1/agent/agent_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/agent_service_pb.js b/src/proto/js/polykey/v1/agent_service_pb.js index ade0b70fa..9fa48c738 100644 --- a/src/proto/js/polykey/v1/agent_service_pb.js +++ b/src/proto/js/polykey/v1/agent_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts index 067688187..b230f8df4 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts @@ -122,12 +122,12 @@ interface IClientServiceService_IAgentUnlock extends grpc.MethodDefinition; responseDeserialize: grpc.deserialize; } -interface IClientServiceService_INodesAdd extends grpc.MethodDefinition { +interface IClientServiceService_INodesAdd extends grpc.MethodDefinition { path: "/polykey.v1.ClientService/NodesAdd"; requestStream: false; responseStream: false; - requestSerialize: grpc.serialize; - requestDeserialize: grpc.deserialize; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; responseSerialize: grpc.serialize; responseDeserialize: grpc.deserialize; } @@ -679,7 +679,7 @@ export interface IClientServiceServer extends grpc.UntypedServiceImplementation agentStatus: grpc.handleUnaryCall; agentStop: grpc.handleUnaryCall; agentUnlock: grpc.handleUnaryCall; - nodesAdd: grpc.handleUnaryCall; + nodesAdd: grpc.handleUnaryCall; nodesPing: grpc.handleUnaryCall; nodesClaim: grpc.handleUnaryCall; nodesFind: grpc.handleUnaryCall; @@ -755,9 +755,9 @@ export interface IClientServiceClient { agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; @@ -943,9 +943,9 @@ export class ClientServiceClient extends grpc.Client implements IClientServiceCl public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public agentUnlock(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; - public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAddress, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; + public nodesAdd(request: polykey_v1_nodes_nodes_pb.NodeAdd, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.EmptyMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; public nodesPing(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_utils_utils_pb.StatusMessage) => void): grpc.ClientUnaryCall; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.js b/src/proto/js/polykey/v1/client_service_grpc_pb.js index 642127423..e08b6512c 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.js +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.js @@ -201,6 +201,17 @@ function deserialize_polykey_v1_nodes_Node(buffer_arg) { return polykey_v1_nodes_nodes_pb.Node.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_polykey_v1_nodes_NodeAdd(arg) { + if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeAdd)) { + throw new Error('Expected argument of type polykey.v1.nodes.NodeAdd'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_polykey_v1_nodes_NodeAdd(buffer_arg) { + return polykey_v1_nodes_nodes_pb.NodeAdd.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_polykey_v1_nodes_NodeAddress(arg) { if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeAddress)) { throw new Error('Expected argument of type polykey.v1.nodes.NodeAddress'); @@ -528,10 +539,10 @@ nodesAdd: { path: '/polykey.v1.ClientService/NodesAdd', requestStream: false, responseStream: false, - requestType: polykey_v1_nodes_nodes_pb.NodeAddress, + requestType: polykey_v1_nodes_nodes_pb.NodeAdd, responseType: polykey_v1_utils_utils_pb.EmptyMessage, - requestSerialize: serialize_polykey_v1_nodes_NodeAddress, - requestDeserialize: deserialize_polykey_v1_nodes_NodeAddress, + requestSerialize: serialize_polykey_v1_nodes_NodeAdd, + requestDeserialize: deserialize_polykey_v1_nodes_NodeAdd, responseSerialize: serialize_polykey_v1_utils_EmptyMessage, responseDeserialize: deserialize_polykey_v1_utils_EmptyMessage, }, diff --git a/src/proto/js/polykey/v1/client_service_pb.js b/src/proto/js/polykey/v1/client_service_pb.js index 4adc8bd6d..68a9ebcb8 100644 --- a/src/proto/js/polykey/v1/client_service_pb.js +++ b/src/proto/js/polykey/v1/client_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/gestalts/gestalts_pb.js b/src/proto/js/polykey/v1/gestalts/gestalts_pb.js index 90435b3be..36b225293 100644 --- a/src/proto/js/polykey/v1/gestalts/gestalts_pb.js +++ b/src/proto/js/polykey/v1/gestalts/gestalts_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/identities/identities_pb.js b/src/proto/js/polykey/v1/identities/identities_pb.js index cbfb21ed9..a52a535f4 100644 --- a/src/proto/js/polykey/v1/identities/identities_pb.js +++ b/src/proto/js/polykey/v1/identities/identities_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/keys/keys_pb.js b/src/proto/js/polykey/v1/keys/keys_pb.js index dfbc1df0b..323ef8f16 100644 --- a/src/proto/js/polykey/v1/keys/keys_pb.js +++ b/src/proto/js/polykey/v1/keys/keys_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts index 79d0fbd58..09fb028eb 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts @@ -98,6 +98,38 @@ export namespace Claim { } } +export class NodeAdd extends jspb.Message { + getNodeId(): string; + setNodeId(value: string): NodeAdd; + + hasAddress(): boolean; + clearAddress(): void; + getAddress(): Address | undefined; + setAddress(value?: Address): NodeAdd; + getForce(): boolean; + setForce(value: boolean): NodeAdd; + getPing(): boolean; + setPing(value: boolean): NodeAdd; + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): NodeAdd.AsObject; + static toObject(includeInstance: boolean, msg: NodeAdd): NodeAdd.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: NodeAdd, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): NodeAdd; + static deserializeBinaryFromReader(message: NodeAdd, reader: jspb.BinaryReader): NodeAdd; +} + +export namespace NodeAdd { + export type AsObject = { + nodeId: string, + address?: Address.AsObject, + force: boolean, + ping: boolean, + } +} + export class NodeBuckets extends jspb.Message { getBucketsMap(): jspb.Map; diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.js b/src/proto/js/polykey/v1/nodes/nodes_pb.js index 8fe0c189f..6dd70cdc3 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.js +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public @@ -24,6 +25,7 @@ goog.exportSymbol('proto.polykey.v1.nodes.Claims', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Connection', null, global); goog.exportSymbol('proto.polykey.v1.nodes.CrossSign', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Node', null, global); +goog.exportSymbol('proto.polykey.v1.nodes.NodeAdd', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeAddress', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeBuckets', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeTable', null, global); @@ -113,6 +115,27 @@ if (goog.DEBUG && !COMPILED) { */ proto.polykey.v1.nodes.Claim.displayName = 'proto.polykey.v1.nodes.Claim'; } +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.polykey.v1.nodes.NodeAdd = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.polykey.v1.nodes.NodeAdd, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.polykey.v1.nodes.NodeAdd.displayName = 'proto.polykey.v1.nodes.NodeAdd'; +} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -978,6 +1001,247 @@ proto.polykey.v1.nodes.Claim.prototype.setForceInvite = function(value) { +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.toObject = function(opt_includeInstance) { + return proto.polykey.v1.nodes.NodeAdd.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.polykey.v1.nodes.NodeAdd} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeAdd.toObject = function(includeInstance, msg) { + var f, obj = { + nodeId: jspb.Message.getFieldWithDefault(msg, 1, ""), + address: (f = msg.getAddress()) && proto.polykey.v1.nodes.Address.toObject(includeInstance, f), + force: jspb.Message.getBooleanFieldWithDefault(msg, 3, false), + ping: jspb.Message.getBooleanFieldWithDefault(msg, 4, false) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.polykey.v1.nodes.NodeAdd} + */ +proto.polykey.v1.nodes.NodeAdd.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.polykey.v1.nodes.NodeAdd; + return proto.polykey.v1.nodes.NodeAdd.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.polykey.v1.nodes.NodeAdd} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.polykey.v1.nodes.NodeAdd} + */ +proto.polykey.v1.nodes.NodeAdd.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setNodeId(value); + break; + case 2: + var value = new proto.polykey.v1.nodes.Address; + reader.readMessage(value,proto.polykey.v1.nodes.Address.deserializeBinaryFromReader); + msg.setAddress(value); + break; + case 3: + var value = /** @type {boolean} */ (reader.readBool()); + msg.setForce(value); + break; + case 4: + var value = /** @type {boolean} */ (reader.readBool()); + msg.setPing(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.polykey.v1.nodes.NodeAdd.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.polykey.v1.nodes.NodeAdd} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeAdd.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getNodeId(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getAddress(); + if (f != null) { + writer.writeMessage( + 2, + f, + proto.polykey.v1.nodes.Address.serializeBinaryToWriter + ); + } + f = message.getForce(); + if (f) { + writer.writeBool( + 3, + f + ); + } + f = message.getPing(); + if (f) { + writer.writeBool( + 4, + f + ); + } +}; + + +/** + * optional string node_id = 1; + * @return {string} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getNodeId = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setNodeId = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional Address address = 2; + * @return {?proto.polykey.v1.nodes.Address} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getAddress = function() { + return /** @type{?proto.polykey.v1.nodes.Address} */ ( + jspb.Message.getWrapperField(this, proto.polykey.v1.nodes.Address, 2)); +}; + + +/** + * @param {?proto.polykey.v1.nodes.Address|undefined} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this +*/ +proto.polykey.v1.nodes.NodeAdd.prototype.setAddress = function(value) { + return jspb.Message.setWrapperField(this, 2, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.clearAddress = function() { + return this.setAddress(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.hasAddress = function() { + return jspb.Message.getField(this, 2) != null; +}; + + +/** + * optional bool force = 3; + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getForce = function() { + return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 3, false)); +}; + + +/** + * @param {boolean} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setForce = function(value) { + return jspb.Message.setProto3BooleanField(this, 3, value); +}; + + +/** + * optional bool ping = 4; + * @return {boolean} + */ +proto.polykey.v1.nodes.NodeAdd.prototype.getPing = function() { + return /** @type {boolean} */ (jspb.Message.getBooleanFieldWithDefault(this, 4, false)); +}; + + +/** + * @param {boolean} value + * @return {!proto.polykey.v1.nodes.NodeAdd} returns this + */ +proto.polykey.v1.nodes.NodeAdd.prototype.setPing = function(value) { + return jspb.Message.setProto3BooleanField(this, 4, value); +}; + + + + + if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. diff --git a/src/proto/js/polykey/v1/notifications/notifications_pb.js b/src/proto/js/polykey/v1/notifications/notifications_pb.js index f50f614f5..80794ae7f 100644 --- a/src/proto/js/polykey/v1/notifications/notifications_pb.js +++ b/src/proto/js/polykey/v1/notifications/notifications_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/permissions/permissions_pb.js b/src/proto/js/polykey/v1/permissions/permissions_pb.js index 53e129985..1b55e4f47 100644 --- a/src/proto/js/polykey/v1/permissions/permissions_pb.js +++ b/src/proto/js/polykey/v1/permissions/permissions_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/secrets/secrets_pb.js b/src/proto/js/polykey/v1/secrets/secrets_pb.js index 5008028d8..28d2e02ae 100644 --- a/src/proto/js/polykey/v1/secrets/secrets_pb.js +++ b/src/proto/js/polykey/v1/secrets/secrets_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/sessions/sessions_pb.js b/src/proto/js/polykey/v1/sessions/sessions_pb.js index c2d81541f..212d584bc 100644 --- a/src/proto/js/polykey/v1/sessions/sessions_pb.js +++ b/src/proto/js/polykey/v1/sessions/sessions_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/test_service_pb.js b/src/proto/js/polykey/v1/test_service_pb.js index f5ab8f2de..56dd0245c 100644 --- a/src/proto/js/polykey/v1/test_service_pb.js +++ b/src/proto/js/polykey/v1/test_service_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/utils/utils_pb.js b/src/proto/js/polykey/v1/utils/utils_pb.js index 852c0903d..39b5c869e 100644 --- a/src/proto/js/polykey/v1/utils/utils_pb.js +++ b/src/proto/js/polykey/v1/utils/utils_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/js/polykey/v1/vaults/vaults_pb.js b/src/proto/js/polykey/v1/vaults/vaults_pb.js index 6b793dc63..153565a46 100644 --- a/src/proto/js/polykey/v1/vaults/vaults_pb.js +++ b/src/proto/js/polykey/v1/vaults/vaults_pb.js @@ -2,6 +2,7 @@ /** * @fileoverview * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. * @suppress {messageConventions} JS Compiler reports an error if a variable or * field starts with 'MSG_' and isn't a translatable message. * @public diff --git a/src/proto/schemas/polykey/v1/client_service.proto b/src/proto/schemas/polykey/v1/client_service.proto index 81782f13b..9c90e0286 100644 --- a/src/proto/schemas/polykey/v1/client_service.proto +++ b/src/proto/schemas/polykey/v1/client_service.proto @@ -22,7 +22,7 @@ service ClientService { rpc AgentUnlock (polykey.v1.utils.EmptyMessage) returns (polykey.v1.utils.EmptyMessage); // Nodes - rpc NodesAdd(polykey.v1.nodes.NodeAddress) returns (polykey.v1.utils.EmptyMessage); + rpc NodesAdd(polykey.v1.nodes.NodeAdd) returns (polykey.v1.utils.EmptyMessage); rpc NodesPing(polykey.v1.nodes.Node) returns (polykey.v1.utils.StatusMessage); rpc NodesClaim(polykey.v1.nodes.Claim) returns (polykey.v1.utils.StatusMessage); rpc NodesFind(polykey.v1.nodes.Node) returns (polykey.v1.nodes.NodeAddress); diff --git a/src/proto/schemas/polykey/v1/nodes/nodes.proto b/src/proto/schemas/polykey/v1/nodes/nodes.proto index bd2b54f85..cd8b23785 100644 --- a/src/proto/schemas/polykey/v1/nodes/nodes.proto +++ b/src/proto/schemas/polykey/v1/nodes/nodes.proto @@ -25,6 +25,13 @@ message Claim { bool force_invite = 2; } +message NodeAdd { + string node_id = 1; + Address address = 2; + bool force = 3; + bool ping = 4; +} + // Bucket index -> a node bucket (from NodeGraph) message NodeBuckets { map buckets = 1; diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index 85b598786..b3bd7cc67 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -9,6 +9,7 @@ import { sysexits } from '@/utils'; import PolykeyAgent from '@/PolykeyAgent'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; +import NodeManager from '@/nodes/NodeManager'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; import * as testNodesUtils from '../../nodes/utils'; @@ -20,12 +21,13 @@ describe('add', () => { const invalidNodeId = IdInternal.fromString('INVALIDID'); const validHost = '0.0.0.0'; const invalidHost = 'INVALIDHOST'; - const port = '55555'; + const port = 55555; let dataDir: string; let nodePath: string; let pkAgent: PolykeyAgent; let mockedGenerateKeyPair: jest.SpyInstance; let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + let mockedPingNode: jest.SpyInstance; beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); mockedGenerateKeyPair = jest @@ -38,6 +40,7 @@ describe('add', () => { path.join(os.tmpdir(), 'polykey-test-'), ); nodePath = path.join(dataDir, 'polykey'); + mockedPingNode = jest.spyOn(NodeManager.prototype, 'pingNode'); // Cannot use the shared global agent since we can't 'un-add' a node pkAgent = await PolykeyAgent.createPolykeyAgent({ password, @@ -60,10 +63,22 @@ describe('add', () => { }); mockedGenerateKeyPair.mockRestore(); mockedGenerateDeterministicKeyPair.mockRestore(); + mockedPingNode.mockRestore(); + }); + beforeEach(async () => { + await pkAgent.nodeGraph.stop(); + await pkAgent.nodeGraph.start({ fresh: true }); + mockedPingNode.mockImplementation(() => true); }); test('adds a node', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(validNodeId), validHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -81,11 +96,17 @@ describe('add', () => { dataDir, ); expect(stdout).toContain(validHost); - expect(stdout).toContain(port); + expect(stdout).toContain(`${port}`); }); test('fails to add a node (invalid node ID)', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(invalidNodeId), validHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(invalidNodeId), + validHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -96,7 +117,13 @@ describe('add', () => { }); test('fails to add a node (invalid IP address)', async () => { const { exitCode } = await testBinUtils.pkStdio( - ['nodes', 'add', nodesUtils.encodeNodeId(validNodeId), invalidHost, port], + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + invalidHost, + `${port}`, + ], { PK_NODE_PATH: nodePath, PK_PASSWORD: password, @@ -105,4 +132,65 @@ describe('add', () => { ); expect(exitCode).toBe(sysexits.USAGE); }); + test('adds a node with --force flag', async () => { + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + '--force', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(0); + // Checking if node was added. + const node = await pkAgent.nodeGraph.getNode(validNodeId); + expect(node?.address).toEqual({ host: validHost, port: port }); + }); + test('fails to add node when ping fails', async () => { + mockedPingNode.mockImplementation(() => false); + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(sysexits.NOHOST); + }); + test('adds a node with --no-ping flag', async () => { + mockedPingNode.mockImplementation(() => false); + const { exitCode } = await testBinUtils.pkStdio( + [ + 'nodes', + 'add', + '--no-ping', + nodesUtils.encodeNodeId(validNodeId), + validHost, + `${port}`, + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(0); + // Checking if node was added. + const node = await pkAgent.nodeGraph.getNode(validNodeId); + expect(node?.address).toEqual({ host: validHost, port: port }); + }); }); diff --git a/tests/bin/nodes/find.test.ts b/tests/bin/nodes/find.test.ts index 56bffd263..b60804c64 100644 --- a/tests/bin/nodes/find.test.ts +++ b/tests/bin/nodes/find.test.ts @@ -158,31 +158,37 @@ describe('find', () => { port: remoteOfflinePort, }); }); - test('fails to find an unknown node', async () => { - const unknownNodeId = nodesUtils.decodeNodeId( - 'vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg', - ); - const { exitCode, stdout } = await testBinUtils.pkStdio( - [ - 'nodes', - 'find', - nodesUtils.encodeNodeId(unknownNodeId!), - '--format', - 'json', - ], - { - PK_NODE_PATH: nodePath, - PK_PASSWORD: password, - }, - dataDir, - ); - expect(exitCode).toBe(sysexits.GENERAL); - expect(JSON.parse(stdout)).toEqual({ - success: false, - message: `Failed to find node ${nodesUtils.encodeNodeId(unknownNodeId!)}`, - id: nodesUtils.encodeNodeId(unknownNodeId!), - host: '', - port: 0, - }); - }); + test( + 'fails to find an unknown node', + async () => { + const unknownNodeId = nodesUtils.decodeNodeId( + 'vrcacp9vsb4ht25hds6s4lpp2abfaso0mptcfnh499n35vfcn2gkg', + ); + const { exitCode, stdout } = await testBinUtils.pkStdio( + [ + 'nodes', + 'find', + nodesUtils.encodeNodeId(unknownNodeId!), + '--format', + 'json', + ], + { + PK_NODE_PATH: nodePath, + PK_PASSWORD: password, + }, + dataDir, + ); + expect(exitCode).toBe(sysexits.GENERAL); + expect(JSON.parse(stdout)).toEqual({ + success: false, + message: `Failed to find node ${nodesUtils.encodeNodeId( + unknownNodeId!, + )}`, + id: nodesUtils.encodeNodeId(unknownNodeId!), + host: '', + port: 0, + }); + }, + global.failedConnectionTimeout, + ); }); diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index f2c4969a0..f00e62566 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -163,13 +163,15 @@ describe('nodesAdd', () => { const addressMessage = new nodesPB.Address(); addressMessage.setHost('127.0.0.1'); addressMessage.setPort(11111); - const request = new nodesPB.NodeAddress(); + const request = new nodesPB.NodeAdd(); request.setNodeId('vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'); request.setAddress(addressMessage); const response = await grpcClient.nodesAdd( request, clientUtils.encodeAuthFromPassword(password), ); + request.setPing(false); + request.setForce(false); expect(response).toBeInstanceOf(utilsPB.EmptyMessage); const result = await nodeGraph.getNode( nodesUtils.decodeNodeId( @@ -184,7 +186,9 @@ describe('nodesAdd', () => { const addressMessage = new nodesPB.Address(); addressMessage.setHost(''); addressMessage.setPort(11111); - const request = new nodesPB.NodeAddress(); + const request = new nodesPB.NodeAdd(); + request.setPing(false); + request.setForce(false); request.setNodeId('vrsc24a1er424epq77dtoveo93meij0pc8ig4uvs9jbeld78n9nl0'); request.setAddress(addressMessage); await expectRemoteError( diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index f0fe65d4e..17035b4dd 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -141,7 +141,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: logger.getChild('remoteNode1'), }); @@ -150,7 +153,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: logger.getChild('remoteNode2'), }); @@ -246,7 +252,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { // Case 1: node already exists in the local node graph (no contact required) const nodeId = nodeId1; const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, + host: localHost, port: 11111 as Port, }; await nodeGraph.setNode(nodeId, nodeAddress); @@ -261,6 +267,11 @@ describe(`${NodeConnectionManager.name} general test`, () => { test( 'finds node (contacts remote node)', async () => { + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); // NodeConnectionManager under test const nodeConnectionManager = new NodeConnectionManager({ keyManager, @@ -274,14 +285,17 @@ describe(`${NodeConnectionManager.name} general test`, () => { // Case 2: node can be found on the remote node const nodeId = nodeId1; const nodeAddress: NodeAddress = { - host: '127.0.0.1' as Host, + host: localHost, port: 11111 as Port, }; const server = await PolykeyAgent.createPolykeyAgent({ nodePath: path.join(dataDir, 'node2'), password, networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: nodeConnectionManagerLogger, }); @@ -296,6 +310,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { await server.stop(); } finally { await nodeConnectionManager.stop(); + mockedPingNode.mockRestore(); } }, global.polykeyStartupTimeout, @@ -319,7 +334,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { nodePath: path.join(dataDir, 'node3'), password, networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, logger: nodeConnectionManagerLogger, }); @@ -355,7 +373,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { logger: logger.getChild('serverPKAgent'), nodePath: path.join(dataDir, 'serverPKAgent'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, }, }); nodeConnectionManager = new NodeConnectionManager({ diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index e6d91f399..63ba90e9d 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -193,6 +193,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { // Seed nodes test('starting should add seed nodes to the node graph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; try { nodeConnectionManager = new NodeConnectionManager({ keyManager, @@ -204,7 +205,17 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + queue: {} as Queue, + sigchain: {} as Sigchain, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toContainEqual(nodeId1); expect(seedNodes).toContainEqual(nodeId2); @@ -216,6 +227,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { } finally { // Clean up await nodeConnectionManager?.stop(); + await nodeManager?.stop(); } }); test('should get seed nodes', async () => { @@ -250,6 +262,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { 'refreshBucket', ); mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -295,6 +312,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); } finally { mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); await queue?.stop(); @@ -309,6 +327,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { 'refreshBucket', ); mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -353,6 +376,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { expect(mockedRefreshBucket).toHaveBeenCalled(); } finally { mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); await queue?.stop(); @@ -367,6 +391,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { 'refreshBucket', ); mockedRefreshBucket.mockImplementation(async () => {}); + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -419,6 +448,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); } finally { mockedRefreshBucket.mockRestore(); + mockedPingNode.mockRestore(); await nodeConnectionManager?.stop(); await nodeManager?.stop(); await queue?.stop(); @@ -440,6 +470,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(async () => true); try { logger.setLevel(LogLevel.WARN); node1 = await PolykeyAgent.createPolykeyAgent({ @@ -499,6 +534,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { expect(node2Nodes).toContain(nodeIdR2); expect(node2Nodes).toContain(nodeId1); } finally { + mockedPingNode.mockRestore(); logger.setLevel(LogLevel.WARN); await node1?.stop(); await node1?.destroy(); From 376292a3d42e2fcddd4fbde6fe45c070c03e98f8 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 9 Jun 2022 13:58:58 +1000 Subject: [PATCH 36/39] fix: small fixes to nodes utils parsing functions --- src/nodes/NodeGraph.ts | 31 ++++++++++++++----------------- src/nodes/utils.ts | 41 ++++++++++++++++++++--------------------- 2 files changed, 34 insertions(+), 38 deletions(-) diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index c9ebaf0f3..6bd6b2f2d 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,4 +1,4 @@ -import type { DB, DBTransaction, LevelPath } from '@matrixai/db'; +import type { DB, DBTransaction, KeyPath, LevelPath } from '@matrixai/db'; import type { NodeId, NodeAddress, @@ -191,14 +191,14 @@ class NodeGraph { }); } - for await (const [key, nodeData] of tran.iterator( + for await (const [keyPath, nodeData] of tran.iterator( { reverse: order !== 'asc', valueAsBuffer: false, }, this.nodeGraphBucketsDbPath, )) { - const { nodeId } = nodesUtils.parseBucketsDbKey(key as Array); + const { nodeId } = nodesUtils.parseBucketsDbKey(keyPath); yield [nodeId, nodeData]; } } @@ -273,13 +273,11 @@ class NodeGraph { const bucketKey = nodesUtils.bucketKey(bucketIndex); // Remove the oldest entry in the bucket const oldestNodeIds: Array = []; - for await (const [key] of tran.iterator({ limit }, [ + for await (const [keyPath] of tran.iterator({ limit }, [ ...this.nodeGraphLastUpdatedDbPath, bucketKey, ])) { - const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey( - key as Array, - ); + const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(keyPath); oldestNodeIds.push(nodeId); } return oldestNodeIds; @@ -421,7 +419,7 @@ class NodeGraph { this.nodeGraphBucketsDbPath, )) { const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseBucketsDbKey(key as Array); + nodesUtils.parseBucketsDbKey(key); if (bucketIndex == null) { // First entry of the first bucket bucketIndex = bucketIndex_; @@ -467,7 +465,7 @@ class NodeGraph { this.nodeGraphLastUpdatedDbPath, )) { const { bucketIndex: bucketIndex_, nodeId } = - nodesUtils.parseLastUpdatedBucketsDbKey(key as Array); + nodesUtils.parseLastUpdatedBucketsDbKey(key); bucketsDbIterator.seek([key[0], key[2]]); // @ts-ignore // eslint-disable-next-line @@ -535,7 +533,7 @@ class NodeGraph { )) { // The key is a combined bucket key and node ID const { bucketIndex: bucketIndexOld, nodeId } = - nodesUtils.parseBucketsDbKey(key as Array); + nodesUtils.parseBucketsDbKey(key); const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const nodeIdKey = nodesUtils.bucketDbKey(nodeId); // If the new own node ID is one of the existing node IDs, it is just dropped @@ -555,7 +553,7 @@ class NodeGraph { if (countNew < this.nodeBucketLimit) { await tran.put([...metaPathNew, 'count'], countNew + 1); } else { - let oldestIndexKey: Array | undefined = undefined; + let oldestIndexKey: KeyPath | undefined = undefined; let oldestNodeId: NodeId | undefined = undefined; for await (const [key] of tran.iterator( { @@ -563,10 +561,9 @@ class NodeGraph { }, indexPathNew, )) { - oldestIndexKey = key as Array; - ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( - key as Array, - )); + oldestIndexKey = key; + ({ nodeId: oldestNodeId } = + nodesUtils.parseLastUpdatedBucketDbKey(key)); } await tran.del([ ...bucketPathNew, @@ -730,7 +727,7 @@ class NodeGraph { }, this.nodeGraphBucketsDbPath, )) { - const info = nodesUtils.parseBucketsDbKey(key as Array); + const info = nodesUtils.parseBucketsDbKey(key); nodeIds.push([info.nodeId, nodeData]); } } @@ -754,7 +751,7 @@ class NodeGraph { }, this.nodeGraphBucketsDbPath, )) { - const info = nodesUtils.parseBucketsDbKey(key as Array); + const info = nodesUtils.parseBucketsDbKey(key); nodeIds.push([info.nodeId, nodeData]); } } diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 0078ef784..1fe3c799d 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,9 +1,10 @@ import type { - NodeId, - NodeIdEncoded, NodeBucket, NodeBucketIndex, + NodeId, + NodeIdEncoded, } from './types'; +import type { KeyPath } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; import { utils as dbUtils } from '@matrixai/db'; @@ -143,18 +144,18 @@ function lastUpdatedKey(lastUpdated: number): Buffer { * The keys look like `!!` * It is assumed that the `!` is the sublevel prefix. */ -function parseBucketsDbKey(keyBufferArray: Array): { +function parseBucketsDbKey(keyPath: KeyPath): { bucketIndex: NodeBucketIndex; bucketKey: string; nodeId: NodeId; } { - const [bucketKeyBuffer, nodeIdBuffer] = keyBufferArray; - if (bucketKeyBuffer == null || nodeIdBuffer == null) { + const [bucketKeyPath, nodeIdKey] = keyPath; + if (bucketKeyPath == null || nodeIdKey == null) { throw new TypeError('Buffer is not an NodeGraph buckets key'); } - const bucketKey = bucketKeyBuffer.toString(); + const bucketKey = bucketKeyPath.toString(); const bucketIndex = lexi.unpack(bucketKey); - const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + const nodeId = IdInternal.fromBuffer(Buffer.from(nodeIdKey)); return { bucketIndex, bucketKey, @@ -167,8 +168,7 @@ function parseBucketsDbKey(keyBufferArray: Array): { * The keys look like `` */ function parseBucketDbKey(keyBuffer: Buffer): NodeId { - const nodeId = IdInternal.fromBuffer(keyBuffer); - return nodeId; + return IdInternal.fromBuffer(keyBuffer); } /** @@ -176,24 +176,23 @@ function parseBucketDbKey(keyBuffer: Buffer): NodeId { * The keys look like `!!-` * It is assumed that the `!` is the sublevel prefix. */ -function parseLastUpdatedBucketsDbKey(keyBufferArray: Array): { +function parseLastUpdatedBucketsDbKey(keyPath: KeyPath): { bucketIndex: NodeBucketIndex; bucketKey: string; lastUpdated: number; nodeId: NodeId; } { - const [bucketKeyBuffer, ...lastUpdatedBufferArray] = keyBufferArray; - if (bucketKeyBuffer == null || lastUpdatedBufferArray == null) { + const [bucketLevel, ...lastUpdatedKeyPath] = keyPath; + if (bucketLevel == null || lastUpdatedKeyPath == null) { throw new TypeError('Buffer is not an NodeGraph index key'); } - const bucketKey = bucketKeyBuffer.toString(); + const bucketKey = bucketLevel.toString(); const bucketIndex = lexi.unpack(bucketKey); if (bucketIndex == null) { throw new TypeError('Buffer is not an NodeGraph index key'); } - const { lastUpdated, nodeId } = parseLastUpdatedBucketDbKey( - lastUpdatedBufferArray, - ); + const { lastUpdated, nodeId } = + parseLastUpdatedBucketDbKey(lastUpdatedKeyPath); return { bucketIndex, bucketKey, @@ -207,19 +206,19 @@ function parseLastUpdatedBucketsDbKey(keyBufferArray: Array): { * The keys look like `-` * It is assumed that the `!` is the sublevel prefix. */ -function parseLastUpdatedBucketDbKey(keyBufferArray: Array): { +function parseLastUpdatedBucketDbKey(keyPath: KeyPath): { lastUpdated: number; nodeId: NodeId; } { - const [lastUpdatedBuffer, nodeIdBuffer] = keyBufferArray; - if (lastUpdatedBuffer == null || nodeIdBuffer == null) { + const [lastUpdatedLevel, nodeIdKey] = keyPath; + if (lastUpdatedLevel == null || nodeIdKey == null) { throw new TypeError('Buffer is not an NodeGraph index bucket key'); } - const lastUpdated = lexi.unpack(lastUpdatedBuffer.toString()); + const lastUpdated = lexi.unpack(lastUpdatedLevel.toString()); if (lastUpdated == null) { throw new TypeError('Buffer is not an NodeGraph index bucket key'); } - const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + const nodeId = IdInternal.fromBuffer(Buffer.from(nodeIdKey)); return { lastUpdated, nodeId, From 22e149108efd15a7f25ac84b2fd3bf5e684077fd Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Thu, 9 Jun 2022 18:14:01 +1000 Subject: [PATCH 37/39] tests: expanding tests for root keypair changing Added some tests to check that a root keyPair change propagates properly. Also added tests for the change for existing and new node connections. #317 --- tests/PolykeyAgent.test.ts | 74 ++++++ tests/nodes/NodeConnection.test.ts | 362 +++++++++++++++++++++++++++++ 2 files changed, 436 insertions(+) diff --git a/tests/PolykeyAgent.test.ts b/tests/PolykeyAgent.test.ts index 9423050ab..7cb1f2fc7 100644 --- a/tests/PolykeyAgent.test.ts +++ b/tests/PolykeyAgent.test.ts @@ -1,4 +1,5 @@ import type { StateVersion } from '@/schema/types'; +import type { KeyManagerChangeData } from '@/keys/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; @@ -9,6 +10,7 @@ import { Status } from '@/status'; import { Schema } from '@/schema'; import * as errors from '@/errors'; import config from '@/config'; +import { promise } from '@/utils/index'; import * as testUtils from './utils'; describe('PolykeyAgent', () => { @@ -175,4 +177,76 @@ describe('PolykeyAgent', () => { }), ).rejects.toThrow(errors.ErrorSchemaVersionTooOld); }); + test('renewRootKeyPair change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.renewRootKeyPair(password); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); + test('resetRootKeyPair change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.resetRootKeyPair(password); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); + test('resetRootCert change event propagates', async () => { + const nodePath = `${dataDir}/polykey`; + let pkAgent: PolykeyAgent | undefined; + try { + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + logger, + }); + const prom = promise(); + pkAgent.events.on( + PolykeyAgent.eventSymbols.KeyManager, + async (data: KeyManagerChangeData) => { + prom.resolveP(data); + }, + ); + await pkAgent.keyManager.resetRootCert(); + + await expect(prom.p).resolves.toBeDefined(); + } finally { + await pkAgent?.stop(); + await pkAgent?.destroy(); + } + }); }); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index dbd95397e..b5bac69e5 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -174,6 +174,13 @@ describe(`${NodeConnection.name} test`, () => { }; } + const newTlsConfig = async (keyManager: KeyManager): Promise => { + return { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: await keyManager.getRootCertChainPem(), + }; + }; + beforeEach(async () => { // Server setup serverDataDir = await fs.promises.mkdtemp( @@ -852,4 +859,359 @@ describe(`${NodeConnection.name} test`, () => { }, global.defaultTimeout * 2, ); + + test('existing connection handles a resetRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.resetRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a renewRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.renewRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootCert on sending side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await clientKeyManager.resetRootCert(); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.resetRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a renewRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.renewRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('existing connection handles a resetRootCert on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + + // Simulate key change + await serverKeyManager.resetRootCert(); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + // Try again + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.resetRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a renewRootKeyPair on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.renewRootKeyPair(password); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootCert on sending side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await clientKeyManager.resetRootCert(); + clientProxy.setTLSConfig(await newTlsConfig(clientKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootKeyPair on receiving side', async () => { + // Simulate key change + await serverKeyManager.resetRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + const connProm = NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + await expect(connProm).rejects.toThrow( + nodesErrors.ErrorNodeConnectionTimeout, + ); + + // Connect with the new NodeId + let conn: NodeConnection | undefined; + try { + conn = await NodeConnection.createNodeConnection({ + targetNodeId: serverKeyManager.getNodeId(), + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a renewRootKeyPair on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await serverKeyManager.renewRootKeyPair(password); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); + test('new connection handles a resetRootCert on receiving side', async () => { + let conn: NodeConnection | undefined; + try { + // Simulate key change + await serverKeyManager.resetRootCert(); + serverProxy.setTLSConfig(await newTlsConfig(serverKeyManager)); + + conn = await NodeConnection.createNodeConnection({ + targetNodeId: targetNodeId, + targetHost: localHost, + targetPort: targetPort, + proxy: clientProxy, + keyManager: clientKeyManager, + nodeConnectionManager: dummyNodeConnectionManager, + destroyCallback, + logger: logger, + clientFactory: async (args) => + GRPCClientAgent.createGRPCClientAgent(args), + timer: timerStart(2000), + }); + + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello!')); + } finally { + await conn?.destroy(); + } + }); }); From c52bf2c9ca4626f4901ca462a24ec89b84d3aae1 Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 10 Jun 2022 16:26:23 +1000 Subject: [PATCH 38/39] build: updating package-lock.json --- package-lock.json | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/package-lock.json b/package-lock.json index ed112e333..d0a5a8765 100644 --- a/package-lock.json +++ b/package-lock.json @@ -55,7 +55,7 @@ "@types/jest": "^27.0.2", "@types/nexpect": "^0.4.31", "@types/node": "^16.11.7", - "@types/node-forge": "^0.9.7", + "@types/node-forge": "^0.10.4", "@types/pako": "^1.0.2", "@types/prompts": "^2.0.13", "@types/readable-stream": "^2.3.11", @@ -2647,9 +2647,10 @@ "license": "MIT" }, "node_modules/@types/node-forge": { - "version": "0.9.10", + "version": "0.10.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.10.10.tgz", + "integrity": "sha512-iixn5bedlE9fm/5mN7fPpXraXlxCVrnNWHZekys8c5fknridLVWGnNRqlaWpenwaijIuB3bNI0lEOm+JD6hZUA==", "dev": true, - "license": "MIT", "dependencies": { "@types/node": "*" } @@ -3374,6 +3375,7 @@ }, "node_modules/balanced-match": { "version": "1.0.2", + "dev": true, "license": "MIT" }, "node_modules/base64-js": { @@ -3454,6 +3456,7 @@ }, "node_modules/brace-expansion": { "version": "1.1.11", + "dev": true, "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", @@ -3774,6 +3777,7 @@ }, "node_modules/concat-map": { "version": "0.0.1", + "dev": true, "license": "MIT" }, "node_modules/console-control-strings": { @@ -5174,6 +5178,7 @@ }, "node_modules/fs.realpath": { "version": "1.0.0", + "dev": true, "license": "ISC" }, "node_modules/function-bind": { @@ -5309,6 +5314,7 @@ }, "node_modules/glob": { "version": "7.2.0", + "dev": true, "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", @@ -5645,6 +5651,7 @@ }, "node_modules/inflight": { "version": "1.0.6", + "dev": true, "license": "ISC", "dependencies": { "once": "^1.3.0", @@ -8103,6 +8110,7 @@ }, "node_modules/minimatch": { "version": "3.1.2", + "dev": true, "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" @@ -8628,6 +8636,7 @@ }, "node_modules/path-is-absolute": { "version": "1.0.1", + "dev": true, "license": "MIT", "engines": { "node": ">=0.10.0" @@ -9372,6 +9381,7 @@ }, "node_modules/rimraf": { "version": "3.0.2", + "dev": true, "license": "ISC", "dependencies": { "glob": "^7.1.3" @@ -12455,7 +12465,9 @@ "version": "16.11.35" }, "@types/node-forge": { - "version": "0.9.10", + "version": "0.10.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.10.10.tgz", + "integrity": "sha512-iixn5bedlE9fm/5mN7fPpXraXlxCVrnNWHZekys8c5fknridLVWGnNRqlaWpenwaijIuB3bNI0lEOm+JD6hZUA==", "dev": true, "requires": { "@types/node": "*" @@ -12918,7 +12930,8 @@ } }, "balanced-match": { - "version": "1.0.2" + "version": "1.0.2", + "dev": true }, "base64-js": { "version": "1.5.1" @@ -12964,6 +12977,7 @@ }, "brace-expansion": { "version": "1.1.11", + "dev": true, "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -13156,7 +13170,8 @@ "version": "8.3.0" }, "concat-map": { - "version": "0.0.1" + "version": "0.0.1", + "dev": true }, "console-control-strings": { "version": "1.1.0", @@ -14086,7 +14101,8 @@ } }, "fs.realpath": { - "version": "1.0.0" + "version": "1.0.0", + "dev": true }, "function-bind": { "version": "1.1.1" @@ -14170,6 +14186,7 @@ }, "glob": { "version": "7.2.0", + "dev": true, "requires": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -14363,6 +14380,7 @@ }, "inflight": { "version": "1.0.6", + "dev": true, "requires": { "once": "^1.3.0", "wrappy": "1" @@ -15912,6 +15930,7 @@ }, "minimatch": { "version": "3.1.2", + "dev": true, "requires": { "brace-expansion": "^1.1.7" } @@ -16235,7 +16254,8 @@ "dev": true }, "path-is-absolute": { - "version": "1.0.1" + "version": "1.0.1", + "dev": true }, "path-key": { "version": "3.1.1" @@ -16694,6 +16714,7 @@ }, "rimraf": { "version": "3.0.2", + "dev": true, "requires": { "glob": "^7.1.3" } From 7253de836f80791e686c7a803bcfc843bdb7990b Mon Sep 17 00:00:00 2001 From: Brian Botha Date: Fri, 10 Jun 2022 17:23:17 +1000 Subject: [PATCH 39/39] fix: removed unneeded timer from NodeConnection test --- tests/nodes/NodeConnection.test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index b5bac69e5..beeb841ed 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -1152,7 +1152,6 @@ describe(`${NodeConnection.name} test`, () => { logger: logger, clientFactory: async (args) => GRPCClientAgent.createGRPCClientAgent(args), - timer: timerStart(2000), }); const client = conn.getClient(); await client.echo(new utilsPB.EchoMessage().setChallenge('hello!'));