diff --git a/ORAM_SNAPSHOT/README.md b/ORAM_SNAPSHOT/README.md new file mode 100644 index 000000000..90fe023cd --- /dev/null +++ b/ORAM_SNAPSHOT/README.md @@ -0,0 +1,26 @@ +This is an enhanced implementation of Path ORAM. + +To run local Redis server: ./start_redis_serve.sh + +To run the tests: ./run_test_suite.sh + +To run main: ./makerun.sh + + +## How to do snapshot: +In generate_db_snapshot.go, change + +const ( + logCapacity = 10 // Logarithm base 2 of capacity (1024 buckets) + Z = 4 // Number of blocks per bucket + stashSize = 20 // Maximum number of blocks in stash +) + +Make sure to place the tracefile and define the path in `ORAM_SNAPSHOT/pathOram/tests/generate_db_snapshot.go` + +The generated snapshot in : dump.rdb and proxy_snapshot.json in the root of this directory + +## How to use snapshot: +Set correct value for arguments logCapacity, Z, stashSize, set the -snapshot flag, tracefile path. + +- Make sure that the generate rdb and executor oram code use the same Key with input `oblisqloram` diff --git a/ORAM_SNAPSHOT/cmd/oramExecutor/oramExecutor b/ORAM_SNAPSHOT/cmd/oramExecutor/oramExecutor new file mode 100755 index 000000000..8246cdbe7 Binary files /dev/null and b/ORAM_SNAPSHOT/cmd/oramExecutor/oramExecutor differ diff --git a/ORAM_SNAPSHOT/pathOram/api/executor.proto b/ORAM_SNAPSHOT/pathOram/api/executor.proto new file mode 100644 index 000000000..4ef554c4c --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/api/executor.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +option go_package = "Executor/service;executor"; + +import "google/protobuf/wrappers.proto"; + +message requestBatch { + int64 requestId = 1; + repeated string keys = 2; + repeated string values = 3; +} + +message respondBatch { + int64 requestId = 1; + repeated string keys =2; + repeated string values =3; +} + +service Executor{ + rpc executeBatch(requestBatch) returns (respondBatch); + rpc initDb(requestBatch) returns (google.protobuf.BoolValue); +} diff --git a/ORAM_SNAPSHOT/pathOram/api/executor/executor.pb.go b/ORAM_SNAPSHOT/pathOram/api/executor/executor.pb.go new file mode 100644 index 000000000..5df5aa95f --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/api/executor/executor.pb.go @@ -0,0 +1,258 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: executor.proto + +package executor + +import ( + wrappers "github.com/golang/protobuf/ptypes/wrappers" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RequestBatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestId int64 `protobuf:"varint,1,opt,name=requestId,proto3" json:"requestId,omitempty"` + Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *RequestBatch) Reset() { + *x = RequestBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_executor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestBatch) ProtoMessage() {} + +func (x *RequestBatch) ProtoReflect() protoreflect.Message { + mi := &file_executor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestBatch.ProtoReflect.Descriptor instead. +func (*RequestBatch) Descriptor() ([]byte, []int) { + return file_executor_proto_rawDescGZIP(), []int{0} +} + +func (x *RequestBatch) GetRequestId() int64 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *RequestBatch) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +func (x *RequestBatch) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +type RespondBatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RequestId int64 `protobuf:"varint,1,opt,name=requestId,proto3" json:"requestId,omitempty"` + Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` + Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *RespondBatch) Reset() { + *x = RespondBatch{} + if protoimpl.UnsafeEnabled { + mi := &file_executor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RespondBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RespondBatch) ProtoMessage() {} + +func (x *RespondBatch) ProtoReflect() protoreflect.Message { + mi := &file_executor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RespondBatch.ProtoReflect.Descriptor instead. +func (*RespondBatch) Descriptor() ([]byte, []int) { + return file_executor_proto_rawDescGZIP(), []int{1} +} + +func (x *RespondBatch) GetRequestId() int64 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *RespondBatch) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +func (x *RespondBatch) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +var File_executor_proto protoreflect.FileDescriptor + +var file_executor_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x58, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, + 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x58, 0x0a, 0x0c, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x32, 0x6d, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x12, 0x2c, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x0d, 0x2e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x1a, + 0x0d, 0x2e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x33, + 0x0a, 0x06, 0x69, 0x6e, 0x69, 0x74, 0x44, 0x62, 0x12, 0x0d, 0x2e, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x42, 0x1b, 0x5a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_executor_proto_rawDescOnce sync.Once + file_executor_proto_rawDescData = file_executor_proto_rawDesc +) + +func file_executor_proto_rawDescGZIP() []byte { + file_executor_proto_rawDescOnce.Do(func() { + file_executor_proto_rawDescData = protoimpl.X.CompressGZIP(file_executor_proto_rawDescData) + }) + return file_executor_proto_rawDescData +} + +var file_executor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_executor_proto_goTypes = []interface{}{ + (*RequestBatch)(nil), // 0: requestBatch + (*RespondBatch)(nil), // 1: respondBatch + (*wrappers.BoolValue)(nil), // 2: google.protobuf.BoolValue +} +var file_executor_proto_depIdxs = []int32{ + 0, // 0: Executor.executeBatch:input_type -> requestBatch + 0, // 1: Executor.initDb:input_type -> requestBatch + 1, // 2: Executor.executeBatch:output_type -> respondBatch + 2, // 3: Executor.initDb:output_type -> google.protobuf.BoolValue + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_executor_proto_init() } +func file_executor_proto_init() { + if File_executor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_executor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_executor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RespondBatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_executor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_executor_proto_goTypes, + DependencyIndexes: file_executor_proto_depIdxs, + MessageInfos: file_executor_proto_msgTypes, + }.Build() + File_executor_proto = out.File + file_executor_proto_rawDesc = nil + file_executor_proto_goTypes = nil + file_executor_proto_depIdxs = nil +} diff --git a/ORAM_SNAPSHOT/pathOram/api/executor/executor_grpc.pb.go b/ORAM_SNAPSHOT/pathOram/api/executor/executor_grpc.pb.go new file mode 100644 index 000000000..24a619486 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/api/executor/executor_grpc.pb.go @@ -0,0 +1,160 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.12.4 +// source: executor.proto + +package executor + +import ( + context "context" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Executor_ExecuteBatch_FullMethodName = "/Executor/executeBatch" + Executor_InitDb_FullMethodName = "/Executor/initDb" +) + +// ExecutorClient is the client API for Executor service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ExecutorClient interface { + ExecuteBatch(ctx context.Context, in *RequestBatch, opts ...grpc.CallOption) (*RespondBatch, error) + InitDb(ctx context.Context, in *RequestBatch, opts ...grpc.CallOption) (*wrappers.BoolValue, error) +} + +type executorClient struct { + cc grpc.ClientConnInterface +} + +func NewExecutorClient(cc grpc.ClientConnInterface) ExecutorClient { + return &executorClient{cc} +} + +func (c *executorClient) ExecuteBatch(ctx context.Context, in *RequestBatch, opts ...grpc.CallOption) (*RespondBatch, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(RespondBatch) + err := c.cc.Invoke(ctx, Executor_ExecuteBatch_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *executorClient) InitDb(ctx context.Context, in *RequestBatch, opts ...grpc.CallOption) (*wrappers.BoolValue, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(wrappers.BoolValue) + err := c.cc.Invoke(ctx, Executor_InitDb_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ExecutorServer is the server API for Executor service. +// All implementations must embed UnimplementedExecutorServer +// for forward compatibility. +type ExecutorServer interface { + ExecuteBatch(context.Context, *RequestBatch) (*RespondBatch, error) + InitDb(context.Context, *RequestBatch) (*wrappers.BoolValue, error) + mustEmbedUnimplementedExecutorServer() +} + +// UnimplementedExecutorServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExecutorServer struct{} + +func (UnimplementedExecutorServer) ExecuteBatch(context.Context, *RequestBatch) (*RespondBatch, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteBatch not implemented") +} +func (UnimplementedExecutorServer) InitDb(context.Context, *RequestBatch) (*wrappers.BoolValue, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitDb not implemented") +} +func (UnimplementedExecutorServer) mustEmbedUnimplementedExecutorServer() {} +func (UnimplementedExecutorServer) testEmbeddedByValue() {} + +// UnsafeExecutorServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExecutorServer will +// result in compilation errors. +type UnsafeExecutorServer interface { + mustEmbedUnimplementedExecutorServer() +} + +func RegisterExecutorServer(s grpc.ServiceRegistrar, srv ExecutorServer) { + // If the following call pancis, it indicates UnimplementedExecutorServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Executor_ServiceDesc, srv) +} + +func _Executor_ExecuteBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBatch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExecutorServer).ExecuteBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Executor_ExecuteBatch_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExecutorServer).ExecuteBatch(ctx, req.(*RequestBatch)) + } + return interceptor(ctx, in, info, handler) +} + +func _Executor_InitDb_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestBatch) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ExecutorServer).InitDb(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Executor_InitDb_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ExecutorServer).InitDb(ctx, req.(*RequestBatch)) + } + return interceptor(ctx, in, info, handler) +} + +// Executor_ServiceDesc is the grpc.ServiceDesc for Executor service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Executor_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "Executor", + HandlerType: (*ExecutorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "executeBatch", + Handler: _Executor_ExecuteBatch_Handler, + }, + { + MethodName: "initDb", + Handler: _Executor_InitDb_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "executor.proto", +} diff --git a/ORAM_SNAPSHOT/pathOram/cmd/oram/main.go b/ORAM_SNAPSHOT/pathOram/cmd/oram/main.go new file mode 100644 index 000000000..0259305bd --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/cmd/oram/main.go @@ -0,0 +1,67 @@ +// THIS FILE DOESN't APPLY TO THE CURRENT VERSION OF ENHANCED ORAM EXECUTABLE + +// package main + +// import ( +// "fmt" + +// "pathOram/pkg/oram/block" +// "pathOram/pkg/oram/bucket" +// "pathOram/pkg/oram/oram" +// "pathOram/pkg/oram/utils" +// ) + +// func main() { +// logCapacity := 2 // height of tree - 1 +// Z := 4 +// redisAddr := "127.0.0.1:6379" // local host default redis port + +// // Create a new ORAM instance +// oramInstance, err := oram.NewORAM(logCapacity, Z, 1000, redisAddr) +// if err != nil { +// fmt.Printf("Error creating ORAM instance: %v\n", err) +// return +// } +// defer oramInstance.RedisClient.Close() + +// // Create a bucket and write it to the root bucket (index 0) +// bucket := bucket.Bucket{ +// Blocks: []block.Block{ +// {Key: "1234", Value: "Block1234"}, +// {Key: "5678", Value: "Block5678"}, +// }, +// } +// utils.PutBucket(oramInstance, 0, bucket) + +// // Print the ORAM tree +// utils.PrintTree(oramInstance) +// fmt.Println() + +// // Perform read path operation +// oramInstance.ReadPath("3", true) + +// // Print the stash map +// utils.PrintStashMap(oramInstance) + +// fmt.Println("--------ORAM core functionality tests--------") + +// // Put a value into the ORAM +// oramInstance.Put(555, "Waterloo") + +// // Retrieve and print the value for key 555 +// value := oramInstance.Get(555) +// fmt.Println("Read key 555 value:", value) + +// // Print the ORAM tree again +// utils.PrintTree(oramInstance) + +// // Uncomment if you want to update the value for key 555 +// // oramInstance.Put(555, "Toronto") + +// // Retrieve and print the updated value for key 555 +// value = oramInstance.Get(555) +// fmt.Println("Read key 555 value:", value) + +// // Print the ORAM tree one last time +// utils.PrintTree(oramInstance) +// } \ No newline at end of file diff --git a/ORAM_SNAPSHOT/pathOram/generate_db_snapshot.sh b/ORAM_SNAPSHOT/pathOram/generate_db_snapshot.sh new file mode 100755 index 000000000..79adb2c87 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/generate_db_snapshot.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Check if a flag is provided +if [ -z "$1" ]; then + echo "Usage: $0 {gen|test}" + exit 1 +fi + +# Run based on the flag provided +if [ "$1" == "gen" ]; then + echo "Generating db snapshot..." + go run ./tests/generate_db_snapshot.go + sleep 120 + mv ./dump.rdb ./snapshot_redis.rdbb # Save file differently to avoid redis interference + if [ $? -ne 0 ]; then + echo "Generating db snapshot failed." + exit 1 + fi +elif [ "$1" == "test" ]; then + echo "Testing db snapshot..." + go run ./tests/test_db_snapshot.go + if [ $? -ne 0 ]; then + echo "Testing db snapshot failed." + exit 1 + fi +else + echo "Invalid flag. Use 'gen' for generate or 'test' for test." + exit 1 +fi + +echo "Execution completed successfully." diff --git a/ORAM_SNAPSHOT/pathOram/go.mod b/ORAM_SNAPSHOT/pathOram/go.mod new file mode 100644 index 000000000..be4216482 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/go.mod @@ -0,0 +1,26 @@ +module pathOram + +go 1.21 + +toolchain go1.23.1 + +require ( + github.com/go-redis/redis/v8 v8.11.5 + github.com/golang/protobuf v1.5.4 + github.com/schollz/progressbar/v3 v3.14.4 + google.golang.org/grpc v1.65.0 + google.golang.org/protobuf v1.34.2 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/rivo/uniseg v0.4.7 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect +) diff --git a/ORAM_SNAPSHOT/pathOram/go.sum b/ORAM_SNAPSHOT/pathOram/go.sum new file mode 100644 index 000000000..e61d82f37 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/go.sum @@ -0,0 +1,57 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/schollz/progressbar/v3 v3.14.4 h1:W9ZrDSJk7eqmQhd3uxFNNcTr0QL+xuGNI9dEMrw0r74= +github.com/schollz/progressbar/v3 v3.14.4/go.mod h1:aT3UQ7yGm+2ZjeXPqsjTenwL3ddUiuZ0kfQ/2tHlyNI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/ORAM_SNAPSHOT/pathOram/main b/ORAM_SNAPSHOT/pathOram/main new file mode 100755 index 000000000..513b967b8 Binary files /dev/null and b/ORAM_SNAPSHOT/pathOram/main differ diff --git a/ORAM_SNAPSHOT/pathOram/makerun.sh b/ORAM_SNAPSHOT/pathOram/makerun.sh new file mode 100755 index 000000000..0ab5cf2cf --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/makerun.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Set the Go source file for your main program +MAIN_FILE="./cmd/oram/main.go" + +# Set the name for the executable +EXECUTABLE="main" + +# Build the Go program +echo "Building the Go program..." +sudo go build -o $EXECUTABLE $MAIN_FILE + +# Check if the build was successful +if [ $? -ne 0 ]; then + echo "Build failed." + exit 1 +fi + +echo "Build succeeded." + +# Run the executable +echo "Running the program..." +./$EXECUTABLE + +# Check if the program ran successfully +if [ $? -ne 0 ]; then + echo "Program execution failed." + exit 1 +fi + +echo "Program executed successfully." diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/block/block.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/block/block.go new file mode 100644 index 000000000..8f3228dbe --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/block/block.go @@ -0,0 +1,7 @@ +package block + +// Block represents a key-value pair +type Block struct { + Key string // dummy can have key -1 + Value string +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/bucket/bucket.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/bucket/bucket.go new file mode 100644 index 000000000..9e0844611 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/bucket/bucket.go @@ -0,0 +1,11 @@ +package bucket + +import ( + "pathOram/pkg/oram/block" +) + +// Bucket represents a collection of blocks +type Bucket struct { + Blocks []block.Block + RealBlockCount int +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/bucketRequest/bucketRequest.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/bucketRequest/bucketRequest.go new file mode 100644 index 000000000..183eb2d03 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/bucketRequest/bucketRequest.go @@ -0,0 +1,10 @@ +package bucketRequest + +import ( + "pathOram/pkg/oram/bucket" +) + +type BucketRequest struct { + BucketId int + Bucket bucket.Bucket +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/crypto/crypto.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/crypto/crypto.go new file mode 100644 index 000000000..0adfe8a3a --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/crypto/crypto.go @@ -0,0 +1,63 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + "io" + mathrand "math/rand" +) + +// Cryptography functions + +func GetRandomInt(max int) int { + return mathrand.Intn(max) +} + +func GenerateRandomKey() ([]byte, error) { + key := make([]byte, 32) // AES-256 + _, err := io.ReadFull(rand.Reader, key) + if err != nil { + return nil, err + } + return key, nil +} + +func Encrypt(data []byte, key []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + ciphertext := make([]byte, aes.BlockSize+len(data)) + iv := ciphertext[:aes.BlockSize] + + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err + } + + stream := cipher.NewCFBEncrypter(block, iv) + stream.XORKeyStream(ciphertext[aes.BlockSize:], data) + + return ciphertext, nil +} + +func Decrypt(data []byte, key []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + if len(data) < aes.BlockSize { + return nil, errors.New("ciphertext too short") + } + + iv := data[:aes.BlockSize] + ciphertext := data[aes.BlockSize:] + + stream := cipher.NewCFBDecrypter(block, iv) + stream.XORKeyStream(ciphertext, ciphertext) + + return ciphertext, nil +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor new file mode 100755 index 000000000..ac50b9c69 Binary files /dev/null and b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor differ diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor.go new file mode 100644 index 000000000..ebc2adead --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor.go @@ -0,0 +1,172 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + executor "pathOram/api/executor" + + "github.com/golang/protobuf/ptypes/wrappers" + // "github.com/redis/go-redis/v9" + "google.golang.org/grpc" + + "pathOram/pkg/oram/oram" + "pathOram/pkg/oram/request" +) + +const ( + logCapacity = 10 // Logarithm base 2 of capacity (1024 buckets) + Z = 4 // Number of blocks per bucket + stashSize = 2000 // Maximum number of blocks in stash +) + +type myExecutor struct { + executor.UnimplementedExecutorServer + // rdb *redis.Client + o *oram.ORAM +} + +type StringPair struct { + First string + Second string +} + +func (e myExecutor) ExecuteBatch(ctx context.Context, req *executor.RequestBatch) (*executor.RespondBatch, error) { + fmt.Printf("Got a request with ID: %d \n", req.RequestId) + + // set batchsize + batchSize := 50 + + // Batching(requests []request.Request, batchSize int) + + var replyKeys []string + var replyVals []string + + for start := 0; start < len(req.Values); start += batchSize { + + var requestList []request.Request + var returnValues []string + + end := start + batchSize + if end > len(req.Values) { + end = len(req.Values) // Ensure we don't go out of bounds + } + + // Slice the keys and values for the current batch + batchKeys := req.Keys[start:end] + batchValues := req.Values[start:end] + + for i := range batchKeys { + // Read operation + currentRequest := request.Request{ + Key: batchKeys[i], + Value: batchValues[i], + } + + requestList = append(requestList, currentRequest) + } + + returnValues, _ = e.o.Batching(requestList, batchSize) + + replyKeys = append(replyKeys, batchKeys...) + replyVals = append(replyVals, returnValues...) + + // TODO: create a list of Request objects with Type and key and value + + // TODO: call o.Batching(requests, batch size) this will return a list of VALUES + + // TODO: handle it like replyKeys = append(replyKeys, rec_block.Key) + // replyVals = append(replyVals, rec_block.Value) + + } + + return &executor.RespondBatch{ + RequestId: req.RequestId, + Keys: replyKeys, + Values: replyVals, + }, nil +} + +func (e myExecutor) InitDb(ctx context.Context, req *executor.RequestBatch) (*wrappers.BoolValue, error) { + fmt.Printf("Initialize DB with Key Size: %d \n", len(req.Keys)) + + e.o.RedisClient.FlushData() + e.o.ClearKeymap() + e.o.ClearStash() + // var pairs []interface{} + + // TODO: split keys and values by batchsize + // TODO: call o.Batching(requests, batch size) this will return a list of VALUES - can be neglected + + // TODO: need to tune oram.go to distinguish between put and get requests based on presence of value in requests object + + // set batchsize + batchSize := 50 + + for start := 0; start < len(req.Values); start += batchSize { + + var requestList []request.Request + + end := start + batchSize + if end > len(req.Values) { + end = len(req.Values) // Ensure we don't go out of bounds + } + + // Slice the keys and values for the current batch + batchKeys := req.Keys[start:end] + batchValues := req.Values[start:end] + + for i := range batchKeys { + //pairs = append(pairs, req.Keys[i], req.Values[i]) + // Read operation + currentRequest := request.Request{ + Key: batchKeys[i], + Value: batchValues[i], + } + + requestList = append(requestList, currentRequest) + } + e.o.Batching(requestList, batchSize) + } + + return &wrappers.BoolValue{Value: true}, nil + +} + +func main() { + redisHost := flag.String("rh", "127.0.0.1", "Redis Host") + redisPort := flag.String("rp", "6379", "Redis Host") + addrPort := flag.String("p", "9090", "Executor Port") + + flag.Parse() + + lis, err := net.Listen("tcp", ":"+*addrPort) + if err != nil { + log.Fatalf("Cannot create listener on port :9090 %s", err) + } + + addr := *redisHost + ":" + *redisPort + + oram_object, err := oram.NewORAM(logCapacity, Z, stashSize, addr, false, nil) + if err != nil { + log.Fatalf("Error initializing ORAM: %v", err) + } + defer oram_object.RedisClient.Close() + + fmt.Println("Connected to Redis Server on: ", addr) + + // service := myExecutor{rdb: rdb} + service := myExecutor{o: oram_object} + serverRegister := grpc.NewServer(grpc.MaxRecvMsgSize(644000*300), grpc.MaxSendMsgSize(644000*300)) + + log.Println("Starting Server!") + executor.RegisterExecutorServer(serverRegister, service) + err = serverRegister.Serve(lis) + if err != nil { + log.Fatalf("Error! Could not start server %s", err) + } + +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor_test.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor_test.go new file mode 100644 index 000000000..f3164e4aa --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/executor_test.go @@ -0,0 +1,151 @@ +package main + +import ( + "context" + "fmt" + "log" + "reflect" + "testing" + + executor "pathOram/api/executor" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +var testCases = []TestCase{ + { + name: "Valid Get Request", + requestBatch: &executor.RequestBatch{ + RequestId: 2, + Keys: []string{"K1", "K2", "K3"}, + Values: []string{"", "", ""}, + }, + expected: &executor.RespondBatch{ + RequestId: 2, + Keys: []string{"K1", "K2", "K3"}, + Values: []string{"V1", "V2", "V3"}, + }, + }, + { + name: "Get Non-existant Value", + requestBatch: &executor.RequestBatch{ + RequestId: 3, + Keys: []string{"K100", "K200", "K10000"}, + Values: []string{"", "", ""}, + }, + expected: &executor.RespondBatch{ + RequestId: 3, + Keys: []string{"K100", "K200", "K10000"}, + Values: []string{"V100", "V200", "-1"}, + }, + }, + { + name: "Normal Put Request", + requestBatch: &executor.RequestBatch{ + RequestId: 4, + Keys: []string{"K1", "K2", "K3"}, + Values: []string{"VT1", "VT2", "VT3"}, + }, + expected: &executor.RespondBatch{ + RequestId: 4, + Keys: []string{"K1", "K2", "K3"}, + Values: []string{"VT1", "VT2", "VT3"}, + }, + }, + { + name: "Get & Puts in One", + requestBatch: &executor.RequestBatch{ + RequestId: 5, + Keys: []string{"K1", "K2", "K1", "K3", "K1", "K1", "K1"}, + Values: []string{"", "", "Test", "VT3", "", "Final", ""}, + }, + expected: &executor.RespondBatch{ + RequestId: 5, + Keys: []string{"K1", "K2", "K1", "K3", "K1", "K1", "K1"}, + Values: []string{"VT1", "VT2", "Test", "VT3", "Test", "Final", "Final"}, + }, + }, + { + name: "Get & Puts With Missing Key", + requestBatch: &executor.RequestBatch{ + RequestId: 6, + Keys: []string{"K1", "K2", "K1", "k999999", "K3", "K1", "K1", "K1"}, + Values: []string{"", "", "Test", "", "VT3", "", "Final", ""}, + }, + expected: &executor.RespondBatch{ + RequestId: 6, + Keys: []string{"K1", "K2", "K1", "k999999", "K3", "K1", "K1", "K1"}, + Values: []string{"Final", "VT2", "Test", "-1", "VT3", "Test", "Final", "Final"}, + }, + }, + { + name: "Missing Key, then Inserted", + requestBatch: &executor.RequestBatch{ + RequestId: 7, + Keys: []string{"K999999", "K999999", "K999999"}, + Values: []string{"", "Hello", ""}, + }, + expected: &executor.RespondBatch{ + RequestId: 7, + Keys: []string{"K999999", "K999999", "K999999"}, + Values: []string{"-1", "Hello", "Hello"}, + }, + }, +} + +type TestCase struct { + name string + requestBatch *executor.RequestBatch + expected *executor.RespondBatch +} + +func generateDataSet() *executor.RequestBatch { + newReq := executor.RequestBatch{ + RequestId: 1, + Keys: make([]string, 0), + Values: make([]string, 0), + } + + for i := 0; i < 1000; i++ { + newKey := fmt.Sprintf("K%d", i) + newValue := fmt.Sprintf("V%d", i) + newReq.Keys = append(newReq.Keys, newKey) + newReq.Values = append(newReq.Values, newValue) + } + + return &newReq +} + +func TestExecutor(t *testing.T) { + ctx := context.Background() + fullAddr := "localhost:9090" + conn, err := grpc.NewClient(fullAddr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(644000*300), grpc.MaxCallSendMsgSize(644000*300))) + if err != nil { + log.Fatalf("Couldn't Connect to Executor Proxy at localhost:9090. Error: %s", err) + } + newClient := executor.NewExecutorClient(conn) + //Testing Init DB + initSuccess, err := newClient.InitDb(ctx, generateDataSet()) + if err != nil { + log.Fatalln("Failed to Init DB! Error: ", err) + } + + if !initSuccess.Value { + log.Fatalln("Init DB returned false! Failed to init DB") + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fmt.Printf("Running test: %s \n", tc.name) + resp, err := newClient.ExecuteBatch(ctx, tc.requestBatch) + if err != nil { + t.Errorf("ExecuteBatch Error = %v", err) + return + } + if !reflect.DeepEqual(resp.Keys, tc.expected.Keys) || !reflect.DeepEqual(resp.Values, tc.expected.Values) { + t.Errorf("ExecuteBatch Values not same! \n Expected Keys: %v, Got: %v \n Expected Values: %v, Got: %v \n", tc.expected.Keys, resp.Keys, tc.expected.Values, resp.Values) + } + }) + } +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/run_executor_test.sh b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/run_executor_test.sh new file mode 100755 index 000000000..376aa3a69 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/executor/run_executor_test.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Run the tests +echo "Running the tests..." +go test -v ./executor_test.go + +# Check if the tests ran successfully +if [ $? -ne 0 ]; then + echo "Test execution failed." + exit 1 +fi + +echo "Tests executed successfully." diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/oram/oram.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/oram/oram.go new file mode 100644 index 000000000..e37469569 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/oram/oram.go @@ -0,0 +1,453 @@ +package oram + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "sort" + + "pathOram/pkg/oram/block" + "pathOram/pkg/oram/bucket" + "pathOram/pkg/oram/bucketRequest" + "pathOram/pkg/oram/crypto" + "pathOram/pkg/oram/redis" + "pathOram/pkg/oram/request" + + "github.com/schollz/progressbar/v3" +) + +// Core ORAM functions + +type ORAM struct { + LogCapacity int // height of the tree or logarithm base 2 of capacity (i.e. capacity is 2 to the power of this value) + Z int // number of blocks in a bucket (typically, 3 to 7) + RedisClient *redis.RedisClient + StashMap map[string]block.Block + StashSize int // Maximum number of blocks the stash can hold + Keymap map[string]int +} + +func NewORAM(LogCapacity, Z, StashSize int, redisAddr string, useSnapshot bool, key []byte) (*ORAM, error) { + // If key is not provided (nil or empty), generate a random key + if len(key) == 0 { + var err error + key, err = crypto.GenerateRandomKey() + if err != nil { + return nil, err + } + } + + client, err := redis.NewRedisClient(redisAddr, key) + if err != nil { + return nil, err + } + + oram := &ORAM{ + RedisClient: client, + LogCapacity: LogCapacity, + Z: Z, + StashSize: StashSize, + StashMap: make(map[string]block.Block), + Keymap: make(map[string]int), + } + + if useSnapshot { + // Load the Stashmap and Keymap into memory + // Allow redis to update state using dump.rdb + oram.loadSnapshotMaps() + fmt.Println("ORAM initialized with provided snapshot") + } else { + fmt.Println("Flushing Redis DB and initializing ORAM") + // Clear the Redis database to ensure a fresh start + if err := client.FlushDB(); err != nil { + return nil, fmt.Errorf("failed to flush Redis database: %v", err) + } + oram.initialize() + + fmt.Println("ORAM DB is ready for opeartions!") + } + + return oram, nil +} + +// Load Keymap and Stashmap into memory +func (oram *ORAM) loadSnapshotMaps() { + // Read from snapshot.json + // Open the file for reading + file, err := os.Open("/Users/nachiketrao/Desktop/URA/obliq-nachi/ObliSQL/pathOram/proxy_snapshot.json") + if err != nil { + fmt.Printf("Error opening file: %v\n", err) + return // No need to return anything here, just exit the function + } + defer file.Close() + + // Decode JSON data into a map + var data map[string]interface{} + decoder := json.NewDecoder(file) + if err := decoder.Decode(&data); err != nil { + fmt.Printf("Error decoding JSON data: %v\n", err) + return // No need to return anything here, just exit the function + } + + // Load Keymap + if keymap, ok := data["Keymap"].(map[string]interface{}); ok { + // Convert map[string]interface{} to map[string]int for Keymap + for key, value := range keymap { + if val, ok := value.(float64); ok { // JSON numbers are decoded as float64 + oram.Keymap[key] = int(val) // Convert float64 to int + } + } + } else { + fmt.Println("Error: Keymap data is not of expected type") + } + + // Load StashMap + if stashmap, ok := data["StashMap"].(map[string]interface{}); ok { + // Convert map[string]interface{} to map[string]block.Block for StashMap + for key, value := range stashmap { + // Assuming block.Block is a struct, and you need to decode the value into that type + stashBlock, ok := value.(map[string]interface{}) // Assuming stash block is a map + if !ok { + fmt.Println("Error: StashMap block is not of expected type") + continue + } + + // Marshal and unmarshal to convert the stash block map to block.Block + stashBlockData, err := json.Marshal(stashBlock) + if err != nil { + fmt.Printf("Error marshaling stash block data: %v\n", err) + continue + } + var blockData block.Block + err = json.Unmarshal(stashBlockData, &blockData) + if err != nil { + fmt.Printf("Error unmarshaling stash block: %v\n", err) + continue + } + // Add the block to StashMap + oram.StashMap[key] = blockData + } + } else { + fmt.Println("Error: StashMap data is not of expected type") + } +} + +// Tree height (real depth) is (o.LogCapacity + 1) +// Number of leaves is (2 ^ (o.LogCapacity)) + +// Initializing ORAM and populating with key = -1 +func (o *ORAM) initialize() { + + totalBuckets := (1 << (o.LogCapacity + 1)) - 1 + bar := progressbar.Default(int64(totalBuckets), "Setting values...") + + for i := 0; i < totalBuckets; i++ { + bucket := bucket.Bucket{ + Blocks: make([]block.Block, o.Z), + } + for j := range bucket.Blocks { + bucket.Blocks[j].Key = "-1" + bucket.Blocks[j].Value = "" + } + o.RedisClient.WriteBucketToDb(i, bucket) // initialize dosen't use redis batching mechanism to write: NOTE: don't run this initialize formally for experiments + + bar.Add(1) + } + + bar.Finish() +} + +// ClearStash clears the ORAM stash by resetting it to an empty state. +func (o *ORAM) ClearStash() { + o.StashMap = make(map[string]block.Block) // Resets the stash (assuming stash is a map of block indices to data). + fmt.Println("Stash has been cleared.") +} + +// ClearKeymap clears the ORAM Keymap by resetting it to an empty state. +func (o *ORAM) ClearKeymap() { + o.Keymap = make(map[string]int) // Resets the stash (assuming stash is a map of block indices to data). + fmt.Println("Keymap has been cleared.") +} + +// getDepth calculates the depth of a bucket in the tree +func (o *ORAM) GetDepth(bucketIndex int) int { + depth := 0 + for (1<> (o.LogCapacity - level)) - 1 +} + +// on this level, do paths of entryLeaf and leaf share the same bucket +func (o *ORAM) canInclude(entryLeaf, leaf, level int) bool { + return entryLeaf>>(o.LogCapacity-level) == leaf>>(o.LogCapacity-level) +} + +// ReadPath reads the paths from the root to the given leaves and optionally populates the stash. +func (o *ORAM) ReadPaths(leafs []int) { + + // Calculate the maximum valid leaf value + maxLeaf := (1 << o.LogCapacity) - 1 + + // Use a map to keep track of unique bucket indices + uniqueBuckets := make(map[int]struct{}) + + // Collect all unique bucket indices from root to each leaf + for _, leaf := range leafs { + if leaf < 0 || leaf > maxLeaf { + fmt.Printf("invalid leaf value: %d, valid range is [0, %d]\n", leaf, maxLeaf) + } + for level := o.LogCapacity; level >= 0; level-- { + bucket := o.bucketForLevelLeaf(level, leaf) + _, exists := uniqueBuckets[bucket] + // If the key exists + if exists { + break + } + uniqueBuckets[bucket] = struct{}{} + } + } + + bucketsData, _ := o.RedisClient.ReadBucketsFromDb(uniqueBuckets) + // Read the blocks from all buckets in all retrived buckets + for _, bucketData := range bucketsData { + for _, block := range bucketData.Blocks { + // Skip "empty" blocks + if block.Key != "-1" { + // fmt.Println("Read block from Tree and put in stash with key: ", block.Key) + o.StashMap[block.Key] = block + } + } + } + +} + +func (o *ORAM) WritePath(bucketInfo []int, requests *[]bucketRequest.BucketRequest) { + + buckedId := bucketInfo[0] + leaf := bucketInfo[1] + level := bucketInfo[2] + + //newBucketsWritten := make(map[int]struct{}) + + // Step 1: Get all blocks from stash + currentStash := make(map[string]block.Block) + for blockId, block := range o.StashMap { + currentStash[blockId] = block + } + + toDelete := make(map[string]struct{}) // track blocks that need to be deleted from stash + + toInsert := make(map[string]block.Block) // blocks to be inserted in the bucket (up to Z) + + toDeleteLocal := make([]string, 0) // indices/blockIds/keys of blocks in currentStash to delete + + // Determine what blocks from stash can go into Bucket under consideration + // A block can only be inserted into Bucket if its current leaf path and Block's level/leaf intersect + for key, currentBlock := range currentStash { + currentBlockLeaf := o.Keymap[currentBlock.Key] + if o.canInclude(currentBlockLeaf, leaf, level) { + // fmt.Println("This block can be written to Tree with key: ", currentBlock.Key) + toInsert[currentBlock.Key] = currentBlock + toDelete[currentBlock.Key] = struct{}{} + toDeleteLocal = append(toDeleteLocal, key) // add the key for block we want to delete + if len(toInsert) == o.Z { + break + } + } + } + + // Delete inserted blocks from currentStash + for _, key := range toDeleteLocal { + delete(currentStash, key) + } + + // Prepare the bucket for writing + + bkt := bucket.Bucket{ + Blocks: make([]block.Block, o.Z), + } + + i := 0 + for _, block := range toInsert { + bkt.Blocks[i] = block + i++ + if i >= o.Z { + break + } + } + + // If there are fewer blocks than o.Z, fill the remaining slots with dummy blocks + for i < o.Z { + bkt.Blocks[i] = block.Block{Key: "-1", Value: ""} + i++ + } + + *requests = append(*requests, bucketRequest.BucketRequest{ + BucketId: buckedId, + Bucket: bkt, + }) + + // Update the stash map by removing the newly inserted blocks + for key := range toDelete { + delete(o.StashMap, key) + } + +} + +func (o *ORAM) WritePaths(previousPositionLeaves []int) { + + requests := make([]bucketRequest.BucketRequest, 0) + bucketsToFillMap := make(map[int][]int) // Map to store bucketId and its associated [leaf, level] + + for _, leaf := range previousPositionLeaves { + for level := o.LogCapacity; level >= 0; level-- { + bucketId := o.bucketForLevelLeaf(level, leaf) + bucketsToFillMap[bucketId] = []int{leaf, level} // Store leaf and level for each bucketId + } + } + + // Extract keys from the map + var bucketIds []int + for bucketId := range bucketsToFillMap { + bucketIds = append(bucketIds, bucketId) + } + + // Sort bucketIds in descending order + sort.Slice(bucketIds, func(i, j int) bool { + return bucketIds[i] > bucketIds[j] + }) + + // Create a nested list [bucketId:[leaf, level]] + var nestedList [][]int + for _, bucketId := range bucketIds { + // Access leaf and level for the current bucketId + leafLevel := bucketsToFillMap[bucketId] + nestedList = append(nestedList, []int{bucketId, leafLevel[0], leafLevel[1]}) + } + + // Now we have all the bucketIDs that we need to fill - these are all the bucketIDs ReadPaths read in for us + + // Iterate through the nested list + for _, entry := range nestedList { + o.WritePath(entry, &requests) + } + + // Set the requests in Redis + if err := o.RedisClient.WriteBucketsToDb(requests); err != nil { + fmt.Println("Error writing buckets : ", err) + } + +} + +/* +initialize fully for now beforehand + +create separate repo for base pathoram + +batching optimization is kind of like: +run readpath on 50 - but maintain set and fetch every block only once, one redis request for all 50 + +write/read stashmap[key] for all 50 + +writepath to 50 previousPositionLeafs and try to clear out stash - 1 redis request + +many batches come in; go routines? +*/ + +func (o *ORAM) Batching(requests []request.Request, batchSize int) ([]string, error) { + + if len(requests) > batchSize { + return nil, errors.New("batch size exceeded") + } + + // fakeReadMap = {key: fakeRead?, key: fakeRead? , ...} + fakeReadMap := make(map[string]bool) + + //determine keys from position map for all of them ; if something doesn't exist, add it to Keymap and stash as done in Access() + previousPositionLeavesMap := make(map[int]struct{}) + + // previousPositionLeaves: List of Unique leaves + // - true leaves for first time keys in batch that already exist in system + // - fake leaves for keys never seen before (GET or PUT) + // - fake leaves for keys already seen in batch + var previousPositionLeaves []int + + for _, req := range requests { + + _, keyReadBefore := fakeReadMap[req.Key] + var previousPositionLeaf = -1 + + if !keyReadBefore { // if seeing key for first time in batch + var exists bool + previousPositionLeaf, exists = o.Keymap[req.Key] + + if !exists { // if key doesn't exist in Keymap + if req.Value == "" { // New key: GET request + // GET request for a new key, perform a random fake read, don't add a block, need to return -1 + previousPositionLeaf = crypto.GetRandomInt(1 << o.LogCapacity) + // Skip adding to stash + } else { // New key: PUT request + // PUT request for a new key, add block to stash + previousPositionLeaf = crypto.GetRandomInt(1 << o.LogCapacity) + o.StashMap[req.Key] = block.Block{Key: req.Key, Value: req.Value} + } + } + fakeReadMap[req.Key] = true // This key has now been visited; future appearances in batch should lead to fake reads + + } else { // if key seen before in batch + previousPositionLeaf = crypto.GetRandomInt(1 << o.LogCapacity) + } + + // ensure there are no duplicates in previousPositionLeaves - otherwise writepaths may overwrite content + if _, alreadyExists := previousPositionLeavesMap[previousPositionLeaf]; !alreadyExists { + previousPositionLeavesMap[previousPositionLeaf] = struct{}{} + previousPositionLeaves = append(previousPositionLeaves, previousPositionLeaf) // previousPositionLeaves goes from index 0..batchSize - 1 + } + + // randomly remap key + o.Keymap[req.Key] = crypto.GetRandomInt(1 << o.LogCapacity) + } + + // perform read path, go through all paths at once and find non overlapping buckets, fetch all from redis at once - read path use redis MGET + // adding all blocks to stash + o.ReadPaths(previousPositionLeaves) + + // Retrieve values from stash map for all keys in requests and load them into an array + values := make([]string, len(requests)) + + // Craft reply to requests + for i, req := range requests { + if req.Value != "" { + // Replying to PUT requests + o.StashMap[req.Key] = block.Block{Key: req.Key, Value: req.Value} + values[i] = o.StashMap[req.Key].Value + } else if block, exists := o.StashMap[req.Key]; exists { + // Replying to GET requests that access existing data + values[i] = block.Value + } else { + // Replying to GET requests trying to access non-existent keys + values[i] = "-1" + } + } + + // write path 50 previous position leafs at once to redis - MGET vs pipeline - use MGET 180X faster than GET, pipeline is 12x faster than GET. pipeline allows batching, non blocking for other clients + // pipeline also allows different types of commands. + + // for each batch, as we writepath, keep a track of all buckets already touched/altered + // if a bucket is already changed, we dont' write that bucket or any of the buckets above it + // becuase all buckets above have already been considered for all elements of the stash + // no point in redoing, this is an optmization. also, the other path may write -1s into + // already filled buckets + + o.WritePaths(previousPositionLeaves) + + // return the results to the batch in an array + return values, nil +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/redis/redis.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/redis/redis.go new file mode 100644 index 000000000..9dbd878d1 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/redis/redis.go @@ -0,0 +1,179 @@ +package redis + +import ( + "context" + "encoding/json" + "fmt" + + "pathOram/pkg/oram/bucket" + "pathOram/pkg/oram/bucketRequest" + "pathOram/pkg/oram/crypto" + + "github.com/go-redis/redis/v8" +) + +/* +MGET is faster than GET only because its 1RTT + redis processing time, with GET, there is 1RTT for each GET request +*/ +type RedisClient struct { + Client *redis.Client + EncryptionKey []byte + Ctx context.Context +} + +func NewRedisClient(redisAddr string, encryptionKey []byte) (*RedisClient, error) { + ctx := context.Background() + client := redis.NewClient(&redis.Options{ + Addr: redisAddr, + }) + _, err := client.Ping(ctx).Result() + if err != nil { + return nil, err + } + + return &RedisClient{ + Client: client, + EncryptionKey: encryptionKey, + Ctx: ctx, + }, nil +} + +func (r *RedisClient) FlushDB() error { + return r.Client.FlushDB(r.Ctx).Err() +} + +func (r *RedisClient) FlushData() { + ctx := context.Background() + r.Client.FlushAll(ctx) +} + +// type BucketRequest struct { +// bucketId int +// bucket bucket.Bucket +// } + +func (r *RedisClient) WriteBucketsToDb(requests []bucketRequest.BucketRequest) error { + // Prepare data for MSET + msetArgs := make([]interface{}, 0, len(requests)*2) + + for _, req := range requests { + data, err := json.Marshal(req.Bucket) + if err != nil { + return err + } + + encryptedData, err := crypto.Encrypt(data, r.EncryptionKey) + if err != nil { + return err + } + + key := fmt.Sprintf("bucket:%d", req.BucketId) + msetArgs = append(msetArgs, key, encryptedData) + } + + // Use MSET to set multiple key-value pairs at once + _, err := r.Client.MSet(r.Ctx, msetArgs...).Result() // Use MSet method directly + return err +} + +func (r *RedisClient) ReadBucketsFromDb(indices map[int]struct{}) (map[int]bucket.Bucket, error) { + // Convert map keys to slice for MGET + keys := make([]string, 0, len(indices)) + for index := range indices { + keys = append(keys, fmt.Sprintf("bucket:%d", index)) + } + + // Perform MGET operation + data, err := r.Client.MGet(r.Ctx, keys...).Result() + if err != nil { + return nil, err + } + + // Initialize a map to store the retrieved buckets + buckets := make(map[int]bucket.Bucket, len(indices)) + + // Iterate over the retrieved data + for i, raw := range data { + if raw == nil { + continue + } + encryptedData, ok := raw.(string) + if !ok { + return nil, fmt.Errorf("unexpected data type: %T", raw) + } + + // Decrypt the data + decryptedData, err := crypto.Decrypt([]byte(encryptedData), r.EncryptionKey) + if err != nil { + return nil, err + } + + // Unmarshal the decrypted data into a bucket + var bucket1 bucket.Bucket + err = json.Unmarshal(decryptedData, &bucket1) + if err != nil { + return nil, err + } + + // Map the bucket to the corresponding index + var index int + fmt.Sscanf(keys[i], "bucket:%d", &index) + buckets[index] = bucket1 + } + + return buckets, nil +} + +func (r *RedisClient) WriteBucketToDb(index int, bucket bucket.Bucket) error { + data, err := json.Marshal(bucket) + if err != nil { + return err + } + + encryptedData, err := crypto.Encrypt(data, r.EncryptionKey) + if err != nil { + return err + } + + key := fmt.Sprintf("bucket:%d", index) + err = r.Client.Set(r.Ctx, key, encryptedData, 0).Err() + //fmt.Println("writing current request to redis; redis side ") + return err +} + +func (r *RedisClient) ReadBucketFromDb(index int) (bucket.Bucket, error) { + key := fmt.Sprintf("bucket:%d", index) + data, err := r.Client.Get(r.Ctx, key).Bytes() + if err != nil { + return bucket.Bucket{}, err + } + + decryptedData, err := crypto.Decrypt(data, r.EncryptionKey) + if err != nil { + return bucket.Bucket{}, err + } + + var bucket1 bucket.Bucket + err = json.Unmarshal(decryptedData, &bucket1) + if err != nil { + return bucket.Bucket{}, err + } + + return bucket1, nil +} + +func (r *RedisClient) Close() error { + return r.Client.Close() +} + +// TriggerSnapshot will invoke the BGSAVE command to take a snapshot and update the dump.rdb +func (r *RedisClient) TriggerSnapshot() error { + // Trigger an asynchronous background save (BGSAVE) + err := r.Client.BgSave(r.Ctx).Err() + if err != nil { + return fmt.Errorf("failed to trigger BGSAVE: %w", err) + } + + fmt.Println("BGSAVE triggered. The dump.rdb file will be updated.") + return nil +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/request/request.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/request/request.go new file mode 100644 index 000000000..004735d2a --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/request/request.go @@ -0,0 +1,7 @@ +package request + +// Request represents incoming PUT/GET requests +type Request struct { + Key string + Value string +} diff --git a/ORAM_SNAPSHOT/pathOram/pkg/oram/utils/utils.go b/ORAM_SNAPSHOT/pathOram/pkg/oram/utils/utils.go new file mode 100644 index 000000000..aa8ab68c6 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/pkg/oram/utils/utils.go @@ -0,0 +1,38 @@ +package utils + +import ( + "fmt" + "strings" + + "pathOram/pkg/oram/bucket" + "pathOram/pkg/oram/oram" +) + +// PrintTree prints the ORAM tree structure by reading the file sequentially +// Print only the blocks (and corresponding buckets) that have a non dummy key:value pair +func PrintTree(o *oram.ORAM) { + totalBuckets := (1 << (o.LogCapacity + 1)) - 1 + for i := 0; i < totalBuckets; i++ { + b, _ := o.RedisClient.ReadBucketFromDb(i) + indent := strings.Repeat(" ", o.GetDepth(i)) + for _, blk := range b.Blocks { + if blk.Key != "-1" { + fmt.Printf("%sBucket %d:\n", indent, i) + fmt.Printf("%s Key=%s, Value=%s\n", indent, blk.Key, blk.Value) + } + } + } +} + +// PrintStashMap prints the contents of the stash map +func PrintStashMap(o *oram.ORAM) { + fmt.Println("Stash Map contents:") + for _, blk := range o.StashMap { + fmt.Printf("Key: %s, Value: %s\n", blk.Key, blk.Value) + } +} + +// Exported put function used for testing by directly inserting bucket (escaping writepath) +func PutBucket(o *oram.ORAM, index int, b bucket.Bucket) { + o.RedisClient.WriteBucketToDb(index, b) +} diff --git a/ORAM_SNAPSHOT/pathOram/run_test_suite.sh b/ORAM_SNAPSHOT/pathOram/run_test_suite.sh new file mode 100755 index 000000000..33b1dca94 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/run_test_suite.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run the tests +echo "Running the tests..." +go test -v ./tests/oram_batching_test.go +go test -v ./tests/oram_fake_read_test.go + +# Check if the tests ran successfully +if [ $? -ne 0 ]; then + echo "Test execution failed." + exit 1 +fi + +echo "Tests executed successfully." diff --git a/ORAM_SNAPSHOT/pathOram/start_redis_server.sh b/ORAM_SNAPSHOT/pathOram/start_redis_server.sh new file mode 100755 index 000000000..24cf0c137 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/start_redis_server.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Run the tests +echo "Starting the redis server in the background..." +redis-server --daemonize yes + +# Check if the tests ran successfully +if [ $? -ne 0 ]; then + echo "Failed to start redis server." + exit 1 +fi diff --git a/ORAM_SNAPSHOT/pathOram/tests/generate_db_snapshot.go b/ORAM_SNAPSHOT/pathOram/tests/generate_db_snapshot.go new file mode 100644 index 000000000..05d707ff6 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/tests/generate_db_snapshot.go @@ -0,0 +1,174 @@ +package main + +import ( + "bufio" + "crypto/sha256" + "encoding/json" + "fmt" + "log" + "os" + "strings" + + "pathOram/pkg/oram/oram" + "pathOram/pkg/oram/request" + "pathOram/pkg/oram/utils" + + "github.com/schollz/progressbar/v3" +) + +/* +The problem is - we need to pass in the same key to redis client for both generated and +testing redis client. otherwise it tries to decrypt with a different key -> doesn't find +the values it's looking for cuz the decryption results in an arbitrary value/object. +The batching mechanism returns a -1 for any key it didn't find. +*/ + +/* +set standard Z = 4 +*/ + +const ( + logCapacity = 19 + Z = 4 // Number of blocks per bucket + stashSize = 7000000 // Maximum number of blocks in stash +) + +// This function simulates operations on ORAM and stores snapshots of internal data. +func main() { + key_input := "oblisqloram" + // Generate the SHA-256 hash of the input string + hash := sha256.New() + hash.Write([]byte(key_input)) + + // Return the 256-bit (32-byte) hash as a byte slice + key := hash.Sum(nil) + + // ----------- + + fmt.Println("Building ORAM snapshot with logCapacity: ", logCapacity) + fmt.Println("Building ORAM snapshot with Z value: ", Z) + + // Initialize ORAM + o, err := oram.NewORAM(logCapacity, Z, stashSize, "127.0.0.1:6379", false, key) + if err != nil { + log.Fatalf("Error initializing ORAM: %v", err) + } + defer o.RedisClient.Close() + + // totalOperations := 1000 // Number of operations you plan to perform + + // Use for testing: + // Create PUT requests + // putRequests := make([]request.Request, totalOperations) + // for i := 0; i < totalOperations; i++ { + // key := strconv.Itoa(i) + // putRequests[i] = request.Request{ + // Key: key, + // Value: fmt.Sprintf("Value%d", i), + // } + // } + + fmt.Println("Loading data from tracefile now:") + + // Load data from tracefile and create Request objects + fileLoc := "/home/haseeb/Desktop/MakeSnapshot/ObliSQL/ORAM_KARIM/serverInput_1.txt" + fmt.Println("Generating for: ", fileLoc) + var requests []request.Request + file, err := os.Open(fileLoc) // TODO: define tracefile path here + if err != nil { + log.Fatalf("failed to open tracefile: %v", err) + } + defer file.Close() + + const maxBufferSize = 1024 * 1024 // 1MB + + scanner := bufio.NewScanner(file) + buffer := make([]byte, maxBufferSize) + scanner.Buffer(buffer, maxBufferSize) + + for scanner.Scan() { + line := scanner.Text() + // Only process lines that start with "SET" + if strings.HasPrefix(line, "SET") { + parts := strings.SplitN(line, " ", 3) + if len(parts) != 3 { + continue // skip lines that don't have exactly 3 parts + } + key := parts[1] + value := parts[2] + requests = append(requests, request.Request{Key: key, Value: value}) + } + } + + if err := scanner.Err(); err != nil { + log.Fatalf("error reading tracefile: %v", err) + } + + fmt.Println("Finished scanning tracefile") + + fmt.Println("Setting values in DB...") + + // Initialize DB with tracefile contents and display a progress bar + batchSize := 10 + bar := progressbar.Default(int64(len(requests)), "Setting values...") + + for start := 0; start < len(requests); start += batchSize { + end := start + batchSize + if end > len(requests) { + end = len(requests) // Ensure we don't go out of bounds + } + + o.Batching(requests[start:end], batchSize) + + // Increment the progress bar by the batch size or remaining items + _ = bar.Add(end - start) + } + + bar.Finish() + fmt.Println("Finished Initializing DB!") + + // Optionally print the ORAM's internal tree (this might be useful for debugging) + // utils.PrintTree(o) + + // Print the stash map to inspect the internal state of ORAM + fmt.Println("Printing the stash...") + utils.PrintStashMap(o) + + // ============================================================= + // Storing Keymap and StashMap in a JSON file + a := o.Keymap + b := o.StashMap + + // Prepare data to write to file + data := map[string]interface{}{ + "Keymap": a, + "StashMap": b, + } + + // Open or create the snapshot file + file, err = os.Create("proxy_snapshot.json") + if err != nil { + log.Fatalf("Error creating file: %v", err) + } + defer file.Close() + + // Serialize data to JSON and write to the file + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") // Indentation for better readability + if err := encoder.Encode(data); err != nil { + log.Fatalf("Error encoding data to JSON: %v", err) + } + + fmt.Println("Data written to file: proxy_snapshot.json") + + // ============================================================= + + // Trigger a save to dump.rdb + fmt.Println("Triggering RDB snapshot save (SAVE)...") + err = o.RedisClient.Client.Save(o.RedisClient.Ctx).Err() // SAVE command + if err != nil { + log.Fatalf("Error triggering RDB snapshot save: %v", err) + } + fmt.Println("RDB snapshot save triggered successfully.") + +} diff --git a/ORAM_SNAPSHOT/pathOram/tests/oram_batching_test.go b/ORAM_SNAPSHOT/pathOram/tests/oram_batching_test.go new file mode 100644 index 000000000..d00bcd873 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/tests/oram_batching_test.go @@ -0,0 +1,116 @@ +/* +Approach to debugging: +use new readpaths, old writepath, +remove batching in some parts if needed +make sure mget and mset are working as they should be +use printtree() and printstash() where needed +*/ + +package oram_test + +import ( + "fmt" + "strconv" + "testing" + + "pathOram/pkg/oram/oram" + "pathOram/pkg/oram/request" + "pathOram/pkg/oram/utils" + + "github.com/schollz/progressbar/v3" +) + +const ( + logCapacity = 10 // Logarithm base 2 of capacity (1024 buckets) + Z = 4 // Number of blocks per bucket + stashSize = 20 // Maximum number of blocks in stash +) + +/* +This test case inserts 1000 values into the key-value store and then retrieves the key:values, asserting their correctness. +*/ +func TestORAMReadWrite(t *testing.T) { + // Initialize your ORAM structure or use a mocked instance + o, err := oram.NewORAM(logCapacity, Z, stashSize, "127.0.0.1:6379", false, nil) + if err != nil { + t.Fatalf("Error initializing ORAM: %v", err) + } + defer o.RedisClient.Close() + + totalOperations := 1000 // Number of operations you plan to perform + + // Create PUT requests + putRequests := make([]request.Request, totalOperations) + for i := 0; i < totalOperations; i++ { + key := strconv.Itoa(i) + putRequests[i] = request.Request{ + Key: key, + Value: fmt.Sprintf("Value%d", i), + } + } + + // Create GET requests + getRequests := make([]request.Request, totalOperations) + for i := 0; i < totalOperations; i++ { + key := strconv.Itoa(i) + getRequests[i] = request.Request{ + Key: key, + Value: "", + } + } + + // Batch size for processing requests + batchSize := 50 // Adjust batch size as needed + + // Process PUT requests in batches + writeProgress := progressbar.Default(int64(totalOperations), "Writing: ") + + for i := 0; i < totalOperations; i += batchSize { + end := i + batchSize + if end > totalOperations { + end = totalOperations + } + _, err := o.Batching(putRequests[i:end], batchSize) + if err != nil { + t.Fatalf("Error during PUT batching: %v", err) + } + writeProgress.Add(end - i) + } + + writeProgress.Finish() + + // fmt.Println("Printing the tree") + // utils.PrintTree(o) + + fmt.Println("Printing the stash...") + utils.PrintStashMap(o) + + // Process GET requests in batches and verify results + readProgress := progressbar.Default(int64(totalOperations), "Reading: ") + + for i := 0; i < totalOperations; i += batchSize { + end := i + batchSize + if end > totalOperations { + end = totalOperations + } + results, err := o.Batching(getRequests[i:end], batchSize) + if err != nil { + t.Fatalf("Error during GET batching: %v", err) + } + + // Verify each key-value pair for the current batch + for j := 0; j < end-i; j++ { + key := getRequests[i+j].Key + expectedValue := fmt.Sprintf("Value%s", key) + if results[j] != expectedValue { + t.Errorf("Mismatched value for key %s: expected %s, got %s", key, expectedValue, results[j]) + } + } + readProgress.Add(end - i) + } + + readProgress.Finish() + + fmt.Println("Printing the stash...") + utils.PrintStashMap(o) +} diff --git a/ORAM_SNAPSHOT/pathOram/tests/oram_fake_read_test.go b/ORAM_SNAPSHOT/pathOram/tests/oram_fake_read_test.go new file mode 100644 index 000000000..644efdff3 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/tests/oram_fake_read_test.go @@ -0,0 +1,103 @@ +package oram_test + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "pathOram/pkg/oram/oram" + "pathOram/pkg/oram/request" + "pathOram/pkg/oram/utils" + + "github.com/schollz/progressbar/v3" +) + +const ( + logCapacity = 10 // Logarithm base 2 of capacity (1024 buckets) + Z = 4 // Number of blocks per bucket + stashSize = 20 // Maximum number of blocks in stash +) + +func TestORAMFakeReadDuplicateKeys(t *testing.T) { + // Initialize ORAM structure + o, err := oram.NewORAM(logCapacity, Z, stashSize, "127.0.0.1:6379", false, nil) + if err != nil { + t.Fatalf("Error initializing ORAM: %v", err) + } + defer o.RedisClient.Close() + + totalOperations := 1000 // Number of operations to perform + keys := []string{"1", "2"} // Keys to use for this test + batchSize := 50 // Batch size for processing requests + + // Seed the random number generator for reproducibility + rand.Seed(time.Now().UnixNano()) + + // Map to keep track of expected values + expectedValues := map[string]string{ + "1": "", + "2": "", + } + + // Slice to store the sequence of requests + requests := make([]request.Request, totalOperations) + + // Generate random requests + for i := 0; i < totalOperations; i++ { + key := keys[rand.Intn(len(keys))] // Randomly pick key 1 or 2 + if rand.Float32() < 0.5 { + // Write operation + value := fmt.Sprintf("Value%d", i) + requests[i] = request.Request{ + Key: key, + Value: value, + } + } else { + // Read operation + requests[i] = request.Request{ + Key: key, + Value: "", + } + } + } + + // Process requests in batches + progress := progressbar.Default(int64(totalOperations), "Processing: ") + + for i := 0; i < totalOperations; i += batchSize { + end := i + batchSize + if end > totalOperations { + end = totalOperations + } + + batch := requests[i:end] + results, err := o.Batching(batch, batchSize) + if err != nil { + t.Fatalf("Error during batch operation: %v", err) + } + + // Handle the results of the batch + for j, req := range batch { + if req.Value != "" { + // Update expected value for PUT request + expectedValues[req.Key] = req.Value + } else { + // Verify the result of the GET request + expectedValue := expectedValues[req.Key] + if expectedValue == "" { + expectedValue = "-1" + } + if results[j] != expectedValue { + t.Errorf("Mismatched value for key %s: expected %s, got %s", req.Key, expectedValue, results[j]) + } + } + } + progress.Add(end - i) + } + + progress.Finish() + + fmt.Println("Final stash state:") + utils.PrintStashMap(o) +} diff --git a/ORAM_SNAPSHOT/pathOram/tests/test_db_snapshot.go b/ORAM_SNAPSHOT/pathOram/tests/test_db_snapshot.go new file mode 100644 index 000000000..b74c9b0a4 --- /dev/null +++ b/ORAM_SNAPSHOT/pathOram/tests/test_db_snapshot.go @@ -0,0 +1,187 @@ +package main + +import ( + "bufio" + "crypto/sha256" + "fmt" + "log" + "os" + "strings" + + "pathOram/pkg/oram/oram" + "pathOram/pkg/oram/request" + // Used to load .env file +) + +const ( + logCapacity = 22 // Logarithm base 2 of capacity (1024 buckets) + Z = 4 // Number of blocks per bucket + stashSize = 7000000 // Maximum number of blocks in stash +) + +// This function simulates operations on ORAM and stores snapshots of internal data. +func main() { + + key_input := "oblisqloram" + // Generate the SHA-256 hash of the input string + hash := sha256.New() + hash.Write([]byte(key_input)) + + // Return the 256-bit (32-byte) hash as a byte slice + key := hash.Sum(nil) + + // -------------------- + + // Initialize ORAM + o, err := oram.NewORAM(logCapacity, Z, stashSize, "127.0.0.1:6379", true, key) + if err != nil { + log.Fatalf("Error initializing ORAM: %v", err) + } + defer o.RedisClient.Close() + + // // Print the Keymap after initializing ORAM + // fmt.Println("Keymap after ORAM initialization:") + // for key, value := range o.Keymap { + // fmt.Printf("Key: %s, Value: %d\n", key, value) + // } + + //totalOperations := 1000 // Number of operations you plan to perform + + // Batch size for processing requests + // batchSize := 10 // Adjust batch size as needed + + // ============================================================= + + var dataMap map[string]string = make(map[string]string) + + // Load data from tracefile and create Request objects + var getRequests []request.Request + file, err := os.Open("/Users/nachiketrao/Desktop/URA/Tracefiles/serverInput.txt") // TODO: define tracefile path here + if err != nil { + log.Fatalf("failed to open tracefile: %v", err) + } + defer file.Close() + + const maxBufferSize = 1024 * 1024 // 1MB + + scanner := bufio.NewScanner(file) + buffer := make([]byte, maxBufferSize) + scanner.Buffer(buffer, maxBufferSize) + + for scanner.Scan() { + line := scanner.Text() + // Only process lines that start with "SET" + if strings.HasPrefix(line, "SET") { + parts := strings.SplitN(line, " ", 3) + if len(parts) != 3 { + continue // skip lines that don't have exactly 3 parts + } + key := parts[1] + value := parts[2] + dataMap[key] = value + getRequests = append(getRequests, request.Request{Key: key, Value: ""}) + } + } + + if err := scanner.Err(); err != nil { + log.Fatalf("error reading tracefile: %v", err) + } + + // log.Println("Reading all tracefile values: (First pass)") + + // readProgress := progressbar.Default(int64(len(getRequests)), "Reading: ") + + // for i := 0; i < len(getRequests); i += batchSize { + // end := i + batchSize + // if end > len(getRequests) { + // end = len(getRequests) + // } + // results, err := o.Batching(getRequests[i:end], min(batchSize, end-i)) + // if err != nil { + // log.Fatalf("Error during GET batching: %v", err) + // } + + // // Verify the results + // for j := 0; j < end-i; j++ { + // key := getRequests[i+j].Key + // //expectedValue := fmt.Sprintf("Value%s", key) + // if results[j] != dataMap[key] { + // log.Printf("Mismatched value for key %s: expected %s, got %s", key, dataMap[key], results[j]) + // } + // } + // readProgress.Add(end - i) + // } + + // readProgress.Finish() + + // utils.PrintStashMap(o) + + // log.Println() + // log.Println("Reading all tracefile values: (Second pass)") + + // readProgress = progressbar.Default(int64(len(getRequests)), "Reading: ") + + // for i := 0; i < len(getRequests); i += batchSize { + // end := i + batchSize + // if end > len(getRequests) { + // end = len(getRequests) + // } + // results, err := o.Batching(getRequests[i:end], min(batchSize, end-i)) + // if err != nil { + // log.Fatalf("Error during GET batching: %v", err) + // } + + // // Verify the results + // for j := 0; j < end-i; j++ { + // key := getRequests[i+j].Key + // //expectedValue := fmt.Sprintf("Value%s", key) + // if results[j] != dataMap[key] { + // log.Printf("Mismatched value for key %s: expected %s, got %s", key, dataMap[key], results[j]) + // } + // } + // readProgress.Add(end - i) + // } + + // readProgress.Finish() + + // utils.PrintStashMap(o) + + // Interactive mode for manual inputs + fmt.Println("Enter SET to store a value or GET to retrieve. Type EXIT to quit.") + scanner = bufio.NewScanner(os.Stdin) + for { + fmt.Print("> ") + scanner.Scan() + input := scanner.Text() + if strings.ToUpper(input) == "EXIT" { + break + } + + parts := strings.Fields(input) + if len(parts) < 2 { + fmt.Println("Invalid command. Use SET or GET .") + continue + } + + command, key := strings.ToUpper(parts[0]), parts[1] + if command == "SET" && len(parts) == 3 { + value := parts[2] + dataMap[key] = value + _, err := o.Batching([]request.Request{{Key: key, Value: value}}, 1) + if err != nil { + log.Printf("Error setting key %s: %v", key, err) + } else { + fmt.Printf("Stored key %s with value %s\n", key, value) + } + } else if command == "GET" { + results, err := o.Batching([]request.Request{{Key: key, Value: ""}}, 1) + if err != nil { + log.Printf("Error getting key %s: %v", key, err) + } else { + fmt.Printf("Retrieved key %s: %s\n", key, results[0]) + } + } else { + fmt.Println("Invalid command. Use SET or GET .") + } + } +} diff --git a/StartupScripts/StartRedis.md b/StartupScripts/StartRedis.md new file mode 100644 index 000000000..1e3201136 --- /dev/null +++ b/StartupScripts/StartRedis.md @@ -0,0 +1,18 @@ +# Redis Multi-Host Deployment Script + +This repository provides a simple bash script (`redis_deploy.sh`) to launch multiple Redis instances across different remote servers using SSH. Each instance runs on a unique port and data directory. + +--- + +## Requirements + +- Redis installed on all target servers. +- SSH access (passwordless recommended, via SSH keys). +- Bash available on the machine where you run the script. + +--- + +## Usage + +```bash +./redis_deploy.sh "" diff --git a/StartupScripts/StartRedis.sh b/StartupScripts/StartRedis.sh new file mode 100644 index 000000000..c81c8b342 --- /dev/null +++ b/StartupScripts/StartRedis.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# redis_deploy.sh +# +# Usage: +# ./redis_deploy.sh "10.0.0.1 10.0.0.2 10.0.0.3" 6380 /hdd1/haseeb/redisdata +# +# Arguments: +# 1. Space-separated list of IP addresses +# 2. Base port number (will increment for each host) +# 3. Data directory (created on each host) + +IPS=($1) +BASE_PORT=$2 +DATA_DIR=$3 + +if [[ -z "$1" || -z "$2" || -z "$3" ]]; then + echo "Usage: $0 \"\" " + exit 1 +fi + +for i in "${!IPS[@]}"; do + HOST=${IPS[$i]} + PORT=$((BASE_PORT + i)) + echo "Deploying Redis on $HOST:$PORT" + + ssh $HOST "mkdir -p $DATA_DIR/$PORT" + + ssh $HOST "nohup redis-server --port $PORT \ + --dir $DATA_DIR/$PORT \ + --daemonize yes \ + --save \"\" --appendonly no > /tmp/redis_$PORT.log 2>&1 &" + + echo "Redis started on $HOST:$PORT" +done diff --git a/jaeger/docker-compose.yaml b/jaeger/docker-compose.yaml index 83a40a195..a27f4760a 100644 --- a/jaeger/docker-compose.yaml +++ b/jaeger/docker-compose.yaml @@ -2,7 +2,8 @@ version: '3.8' services: jaeger: - image: jaegertracing/opentelemetry-all-in-one:latest + image: jaegertracing/all-in-one:latest ports: - - "16686:16686" # api - - "4317:4317" # grpc \ No newline at end of file + - "16686:16686" # UI + HTTP API (v2/v3) + - "16685:16685" # gRPC query + - "4317:4317" # OTLP gRPC ingest \ No newline at end of file diff --git a/jaeger/test_capture/emit.go b/jaeger/test_capture/emit.go new file mode 100644 index 000000000..8731a4704 --- /dev/null +++ b/jaeger/test_capture/emit.go @@ -0,0 +1,74 @@ +package main + +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "google.golang.org/grpc" +) + +func initTracer(ctx context.Context) (*sdktrace.TracerProvider, error) { + // Connect to Jaeger all-in-one collector on localhost:4317 + conn, err := grpc.DialContext(ctx, "localhost:4317", + grpc.WithInsecure(), + grpc.WithBlock(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err) + } + + exp, err := otlptracegrpc.New(ctx, otlptracegrpc.WithGRPCConn(conn)) + if err != nil { + return nil, fmt.Errorf("failed to create OTLP exporter: %w", err) + } + + tp := sdktrace.NewTracerProvider( + sdktrace.WithBatcher(exp), + sdktrace.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceName("go-jaeger-test"), + )), + ) + + otel.SetTracerProvider(tp) + return tp, nil +} + +func main() { + ctx := context.Background() + tp, err := initTracer(ctx) + if err != nil { + panic(err) + } + defer func() { + _ = tp.Shutdown(ctx) + }() + + tracer := otel.Tracer("go-jaeger-test") + + // Generate a few traces + for i := 1; i <= 5; i++ { + func() { + _, span := tracer.Start(ctx, "test-span") + defer span.End() + + span.SetAttributes( + attribute.String("iteration", fmt.Sprint(i)), + attribute.String("status", "running"), + ) + span.AddEvent("doing some work") + time.Sleep(200 * time.Millisecond) + span.AddEvent("work done") + }() + time.Sleep(500 * time.Millisecond) + } + + fmt.Println("✅ Sent test traces to Jaeger") +} diff --git a/jaeger/test_capture/test_capture b/jaeger/test_capture/test_capture new file mode 100755 index 000000000..b8631a8de Binary files /dev/null and b/jaeger/test_capture/test_capture differ