From d1a4fa6b2c1e48062fa1f15c4c414bb84134b89a Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 12:43:39 +0100 Subject: [PATCH 01/18] feat: implement filtered copy from container with size and directory restrictions --- internal/docker/docker_client.go | 22 ++++-- internal/stages/executor/executor.go | 12 ++- internal/stages/executor/executor_test.go | 14 ++-- pkg/constants/constants.go | 5 +- tests/mocks/mocks_generated.go | 12 +-- utils/utils.go | 92 ++++++++++++++++++++++- 6 files changed, 135 insertions(+), 22 deletions(-) diff --git a/internal/docker/docker_client.go b/internal/docker/docker_client.go index 31445ac..f5576a4 100644 --- a/internal/docker/docker_client.go +++ b/internal/docker/docker_client.go @@ -27,7 +27,13 @@ type DockerClient interface { WaitContainer(ctx context.Context, containerID string, timeout time.Duration) (int64, error) ContainerKill(ctx context.Context, containerID, signal string) CopyToContainerFiltered(ctx context.Context, containerID, srcPath, dstPath string, excludeTop []string) error - CopyFromContainer(ctx context.Context, containerID, srcPath, dstPath string) error + CopyFromContainerFiltered( + ctx context.Context, + containerID, srcPath, dstPath string, + allowedDirs []string, + maxFileSize int64, + maxFilesInDir int, + ) error ContainerRemove(ctx context.Context, containerID string) } @@ -141,7 +147,13 @@ func (d *dockerClient) CopyToContainerFiltered( return err } -func (d *dockerClient) CopyFromContainer(ctx context.Context, containerID, srcPath, dstPath string) error { +func (d *dockerClient) CopyFromContainerFiltered( + ctx context.Context, + containerID, srcPath, dstPath string, + allowedDirs []string, + maxFileSize int64, + maxFilesInDir int, +) error { // Get tar archive from container reader, _, err := d.cli.CopyFromContainer(ctx, containerID, srcPath) if err != nil { @@ -150,10 +162,10 @@ func (d *dockerClient) CopyFromContainer(ctx context.Context, containerID, srcPa } defer reader.Close() - // Extract tar archive to destination - err = utils.ExtractTarArchive(reader, dstPath) + // Extract tar archive with filtering to destination + err = utils.ExtractTarArchiveFiltered(reader, dstPath, allowedDirs, maxFileSize, maxFilesInDir) if err != nil { - d.logger.Errorf("Failed to extract tar archive to %s: %s", dstPath, err) + d.logger.Errorf("Failed to extract filtered tar archive to %s: %s", dstPath, err) } return err } diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 9121508..54b31ab 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -129,11 +129,21 @@ func (d *executor) ExecuteCommand( d.logger.Infof("Copying results from container %s [MsgID: %s]", containerID, cfg.MessageID) copyCtx, copyCancel := context.WithTimeout(context.Background(), 30*time.Second) defer copyCancel() - err = d.docker.CopyFromContainer( + + allowedDirs := []string{ + constants.UserOutputDirName, + constants.UserErrorDirName, + constants.UserDiffDirName, + constants.UserExecResultDirName, + } + err = d.docker.CopyFromContainerFiltered( copyCtx, containerID, cfg.DirConfig.PackageDirPath, cfg.DirConfig.TmpDirPath, + allowedDirs, + constants.MaxContainerOutputFileSize, + len(cfg.TestCases), ) if err != nil { return err diff --git a/internal/stages/executor/executor_test.go b/internal/stages/executor/executor_test.go index 155a935..fbbef7a 100644 --- a/internal/stages/executor/executor_test.go +++ b/internal/stages/executor/executor_test.go @@ -77,8 +77,8 @@ func setupMockExpectations( mockDocker.EXPECT().WaitContainer( gomock.Any(), containerID, gomock.Any(), ).Return(statusCode, nil).Times(1), - mockDocker.EXPECT().CopyFromContainer( - gomock.Any(), containerID, gomock.Any(), gomock.Any(), + mockDocker.EXPECT().CopyFromContainerFiltered( + gomock.Any(), containerID, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(nil).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), containerID).Times(1), ) @@ -117,8 +117,8 @@ func TestExecuteCommand_Success(t *testing.T) { mockDocker.EXPECT().WaitContainer( gomock.Any(), "cid123", gomock.Any(), ).Return(int64(0), nil).Times(1), - mockDocker.EXPECT().CopyFromContainer( - gomock.Any(), "cid123", gomock.Any(), gomock.Any(), + mockDocker.EXPECT().CopyFromContainerFiltered( + gomock.Any(), "cid123", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(nil).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), "cid123").Times(1), ) @@ -319,8 +319,8 @@ func TestExecuteCommand_CopyFromContainerFails(t *testing.T) { mockDocker.EXPECT().WaitContainer( gomock.Any(), "cid-copyfrom-fail", gomock.Any(), ).Return(int64(0), nil).Times(1), - mockDocker.EXPECT().CopyFromContainer( - gomock.Any(), "cid-copyfrom-fail", gomock.Any(), gomock.Any(), + mockDocker.EXPECT().CopyFromContainerFiltered( + gomock.Any(), "cid-copyfrom-fail", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(errors.New("copy-from-failed")).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), "cid-copyfrom-fail").Times(1), ) @@ -337,7 +337,7 @@ func TestExecuteCommand_CopyFromContainerFails(t *testing.T) { err := ex.ExecuteCommand(cfg) if err == nil { - t.Fatalf("expected error from CopyFromContainer, got nil") + t.Fatalf("expected error from CopyFromContainerFiltered, got nil") } } diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index a85e0fd..4ef3016 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -79,8 +79,9 @@ const ( // Docker execution constants. const ( - MinContainerMemoryKB int64 = 64 * 1024 // 64 MB - DockerTestScript = "run_tests.sh" + MinContainerMemoryKB int64 = 64 * 1024 // 64 MB + MaxContainerOutputFileSize int64 = 1024 * 1024 // 1 MB per output file + DockerTestScript = "run_tests.sh" ) // RabbitMQ specific constants. diff --git a/tests/mocks/mocks_generated.go b/tests/mocks/mocks_generated.go index 2631365..4cdd0db 100644 --- a/tests/mocks/mocks_generated.go +++ b/tests/mocks/mocks_generated.go @@ -709,18 +709,18 @@ func (mr *MockDockerClientMockRecorder) ContainerRemove(ctx, containerID any) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContainerRemove", reflect.TypeOf((*MockDockerClient)(nil).ContainerRemove), ctx, containerID) } -// CopyFromContainer mocks base method. -func (m *MockDockerClient) CopyFromContainer(ctx context.Context, containerID, srcPath, dstPath string) error { +// CopyFromContainerFiltered mocks base method. +func (m *MockDockerClient) CopyFromContainerFiltered(ctx context.Context, containerID, srcPath, dstPath string, allowedDirs []string, maxFileSize int64, maxFilesInDir int) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyFromContainer", ctx, containerID, srcPath, dstPath) + ret := m.ctrl.Call(m, "CopyFromContainerFiltered", ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir) ret0, _ := ret[0].(error) return ret0 } -// CopyFromContainer indicates an expected call of CopyFromContainer. -func (mr *MockDockerClientMockRecorder) CopyFromContainer(ctx, containerID, srcPath, dstPath any) *gomock.Call { +// CopyFromContainerFiltered indicates an expected call of CopyFromContainerFiltered. +func (mr *MockDockerClientMockRecorder) CopyFromContainerFiltered(ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyFromContainer", reflect.TypeOf((*MockDockerClient)(nil).CopyFromContainer), ctx, containerID, srcPath, dstPath) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyFromContainerFiltered", reflect.TypeOf((*MockDockerClient)(nil).CopyFromContainerFiltered), ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir) } // CopyToContainerFiltered mocks base method. diff --git a/utils/utils.go b/utils/utils.go index 03cbf7d..beb10f5 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -3,6 +3,7 @@ package utils import ( "archive/tar" "errors" + "fmt" "io" "log" "os" @@ -346,7 +347,8 @@ func materializeTarEntry(tarReader *tar.Reader, header *tar.Header, target strin return err } - file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(header.Mode)) + safeMode := os.FileMode(header.Mode) & 0666 // Remove execute bits + file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR|os.O_TRUNC, safeMode) if err != nil { return err } @@ -357,7 +359,95 @@ func materializeTarEntry(tarReader *tar.Reader, header *tar.Header, target strin } CloseFile(file) return nil + case tar.TypeSymlink, tar.TypeLink: + return errors.New("symlinks and hardlinks are not allowed in archives") default: return nil } } + +func isAllowedDirectory(header *tar.Header, allowedSet map[string]struct{}) bool { + parts := strings.Split(header.Name, string(os.PathSeparator)) + if len(parts) == 0 { + return false + } + + var baseDir string + if len(parts) > 1 { + baseDir = parts[1] // Skip the package directory itself + } else { + baseDir = parts[0] + } + + _, allowed := allowedSet[baseDir] + return allowed || header.Typeflag == tar.TypeDir +} + +// validateFileCount enforces the maximum number of files per directory. +func validateFileCount(header *tar.Header, dirFileCount map[string]int, maxFilesInDir int) error { + if header.Typeflag == tar.TypeReg { + dirPath := filepath.Dir(header.Name) + dirFileCount[dirPath]++ + if dirFileCount[dirPath] > maxFilesInDir { + return fmt.Errorf("directory %s exceeds maximum file count of %d", dirPath, maxFilesInDir) + } + } + return nil +} + +func ExtractTarArchiveFiltered( + reader io.Reader, + dstPath string, + allowedDirs []string, + maxFileSize int64, + maxFilesInDir int, +) error { + tarReader := tar.NewReader(reader) + + absDstPath, err := filepath.Abs(dstPath) + if err != nil { + return err + } + + // Build a set of allowed directories for fast lookup + allowedSet := make(map[string]struct{}, len(allowedDirs)) + for _, dir := range allowedDirs { + allowedSet[dir] = struct{}{} + } + + dirFileCount := make(map[string]int, len(allowedDirs)) + + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + return nil + } + if err != nil { + return err + } + + // Validate file size for regular files + if header.Typeflag == tar.TypeReg && header.Size > maxFileSize { + return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) + } + + // Check if the file is in an allowed directory + if !isAllowedDirectory(header, allowedSet) { + continue + } + + // Enforce max files per directory + if err := validateFileCount(header, dirFileCount, maxFilesInDir); err != nil { + return err + } + + target, err := safeArchiveTarget(absDstPath, header.Name) + if err != nil { + return err + } + + if err := materializeTarEntry(tarReader, header, target); err != nil { + return err + } + } +} From bc5d10f36112158cdb3026af40f501260a89bc4c Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 12:45:39 +0100 Subject: [PATCH 02/18] refactor: remove unused functions and clean up code in utils package --- utils/utils.go | 76 -------------------------------------------------- 1 file changed, 76 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index beb10f5..abd06bd 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -93,17 +93,6 @@ func copyAndRemove(src, dst string) error { return os.Remove(src) } -// Checks if given elements is contained in given array. -func Contains[V string](array []V, value V) bool { - for _, el := range array { - if el == value { - return true - } - } - - return false -} - // ValidateFilename checks if a filename contains only safe characters. // Returns an error if the filename contains shell metacharacters or path separators // that could be used for command injection or directory traversal attacks. @@ -156,42 +145,6 @@ func RemoveIO(dir string, recursive, ignoreError bool) error { return err } -// CreateTarArchive creates a tar archive from a directory. -func CreateTarArchive(srcPath string) (io.ReadCloser, error) { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarWriter := tar.NewWriter(pipeWriter) - defer func() { _ = tarWriter.Close() }() - defer func() { _ = pipeWriter.Close() }() - - err := filepath.Walk(srcPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Get relative path. - relPath, err := filepath.Rel(srcPath, path) - if err != nil { - return err - } - - // Skip the root directory itself. - if relPath == "." { - return nil - } - - return writeToTarArchive(tarWriter, path, relPath, info) - }) - - if err != nil { - pipeWriter.CloseWithError(err) - } - }() - - return pipeReader, nil -} - // writeToTarArchive writes a file or directory to a tar archive. func writeToTarArchive(tarWriter *tar.Writer, path, name string, info os.FileInfo) error { // Create tar header. @@ -297,35 +250,6 @@ func CreateTarArchiveWithBaseFiltered(srcPath string, excludeTopLevel []string) return pipeReader, nil } -// ExtractTarArchive extracts a tar archive to a directory. -func ExtractTarArchive(reader io.Reader, dstPath string) error { - tarReader := tar.NewReader(reader) - - absDstPath, err := filepath.Abs(dstPath) - if err != nil { - return err - } - - for { - header, err := tarReader.Next() - if errors.Is(err, io.EOF) { - return nil - } - if err != nil { - return err - } - - target, err := safeArchiveTarget(absDstPath, header.Name) - if err != nil { - return err - } - - if err := materializeTarEntry(tarReader, header, target); err != nil { - return err - } - } -} - func safeArchiveTarget(absDstPath, entryName string) (string, error) { target := filepath.Join(absDstPath, entryName) absTarget, err := filepath.Abs(target) From 9a7e986ba75e758ef6fdc6839fe5f34fcf4cd03d Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 12:47:56 +0100 Subject: [PATCH 03/18] fix: use background context for container cleanup to avoid potential timeout issues --- internal/stages/executor/executor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 54b31ab..cdaee1a 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -99,7 +99,7 @@ func (d *executor) ExecuteCommand( // Ensure container cleanup defer func() { - cleanupCtx, cleanupCancel := context.WithTimeout(ctx, 10*time.Second) + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 10*time.Second) defer cleanupCancel() d.docker.ContainerRemove(cleanupCtx, containerID) }() From ed0112c1cfae07ea4f0facdb6b99f9644e0e8458 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 12:50:25 +0100 Subject: [PATCH 04/18] refactor: remove redundant checks in ExtractTarArchiveFiltered for improved clarity --- utils/utils.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index abd06bd..23238fd 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -333,7 +333,6 @@ func ExtractTarArchiveFiltered( return err } - // Build a set of allowed directories for fast lookup allowedSet := make(map[string]struct{}, len(allowedDirs)) for _, dir := range allowedDirs { allowedSet[dir] = struct{}{} @@ -355,12 +354,10 @@ func ExtractTarArchiveFiltered( return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) } - // Check if the file is in an allowed directory if !isAllowedDirectory(header, allowedSet) { continue } - // Enforce max files per directory if err := validateFileCount(header, dirFileCount, maxFilesInDir); err != nil { return err } From 6c476e39264de069f5d5812814bfdecb2ffecfd1 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 13:04:09 +0100 Subject: [PATCH 05/18] refactor: simplify directory allowance check in tar extraction process --- utils/utils.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index 23238fd..0b25575 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -304,7 +304,7 @@ func isAllowedDirectory(header *tar.Header, allowedSet map[string]struct{}) bool } _, allowed := allowedSet[baseDir] - return allowed || header.Typeflag == tar.TypeDir + return allowed } // validateFileCount enforces the maximum number of files per directory. @@ -349,15 +349,15 @@ func ExtractTarArchiveFiltered( return err } - // Validate file size for regular files - if header.Typeflag == tar.TypeReg && header.Size > maxFileSize { - return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) - } - if !isAllowedDirectory(header, allowedSet) { continue } + // Validate file size for regular files. + if header.Typeflag == tar.TypeReg && header.Size > maxFileSize { + return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) + } + if err := validateFileCount(header, dirFileCount, maxFilesInDir); err != nil { return err } From 7056d008c83b9e133c4f34321af1b5f65c98388c Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 13:08:45 +0100 Subject: [PATCH 06/18] refactor: standardize runner user configuration in Dockerfiles and constants --- internal/stages/executor/cpp/Dockerfile | 2 +- internal/stages/executor/python/Dockerfile | 2 +- pkg/constants/constants.go | 3 +++ utils/utils.go | 9 +++++---- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/internal/stages/executor/cpp/Dockerfile b/internal/stages/executor/cpp/Dockerfile index e103bcc..c930b39 100644 --- a/internal/stages/executor/cpp/Dockerfile +++ b/internal/stages/executor/cpp/Dockerfile @@ -9,6 +9,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY run_tests.sh /usr/local/bin/run_tests.sh RUN chmod +x /usr/local/bin/run_tests.sh -RUN useradd -M -s /usr/sbin/nologin runner +RUN useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner USER runner WORKDIR /tmp diff --git a/internal/stages/executor/python/Dockerfile b/internal/stages/executor/python/Dockerfile index 6f5023d..a4822c9 100644 --- a/internal/stages/executor/python/Dockerfile +++ b/internal/stages/executor/python/Dockerfile @@ -16,6 +16,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY run_tests.sh /usr/local/bin/run_tests.sh RUN chmod +x /usr/local/bin/run_tests.sh -RUN useradd -M -s /usr/sbin/nologin runner +RUN useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner USER runner WORKDIR /tmp diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 4ef3016..09735c5 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -82,6 +82,9 @@ const ( MinContainerMemoryKB int64 = 64 * 1024 // 64 MB MaxContainerOutputFileSize int64 = 1024 * 1024 // 1 MB per output file DockerTestScript = "run_tests.sh" + RunnerUID = 1000 + RunnerGID = 1000 + RunnerName = "runner" ) // RabbitMQ specific constants. diff --git a/utils/utils.go b/utils/utils.go index 0b25575..a35ab23 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -13,6 +13,7 @@ import ( "syscall" "github.com/kballard/go-shellquote" + "github.com/mini-maxit/worker/pkg/constants" ) // Attemts to close the file, and panics if something goes wrong. @@ -153,10 +154,10 @@ func writeToTarArchive(tarWriter *tar.Writer, path, name string, info os.FileInf return err } header.Name = name - header.Gid = 1000 - header.Uid = 1000 - header.Gname = "runner" - header.Uname = "runner" + header.Gid = constants.RunnerGID + header.Uid = constants.RunnerUID + header.Gname = constants.RunnerName + header.Uname = constants.RunnerName if err := tarWriter.WriteHeader(header); err != nil { return err From d4445c394381d0362747be8496fb0a656cc8cbbc Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 14:52:57 +0100 Subject: [PATCH 07/18] refactor: enhance host configuration by incorporating memory limits from test cases --- internal/stages/executor/executor.go | 30 +++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index cdaee1a..5c95b67 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -85,7 +85,7 @@ func (d *executor) ExecuteCommand( env, ) - hostCfg := buildHostConfig() + hostCfg := buildHostConfig(cfg.TestCases) if err := d.docker.EnsureImage(ctx, dockerImage); err != nil { return err @@ -249,17 +249,37 @@ func buildContainerConfig( } } -func buildHostConfig() *container.HostConfig { +func buildHostConfig(testCases []messages.TestCase) *container.HostConfig { + maxKB := constants.MinContainerMemoryKB + for _, tc := range testCases { + if tc.MemoryLimitKB > maxKB { + maxKB = tc.MemoryLimitKB + } + } + + // Headroom: 20% + at least 64MB for runtime/script overhead + headroomKB := maxKB / 5 + minHeadroomKB := int64(64 * 1024) + if headroomKB < minHeadroomKB { + headroomKB = minHeadroomKB + } + + containerKB := maxKB + headroomKB + containerBytes := containerKB * 1024 + return &container.HostConfig{ AutoRemove: false, NetworkMode: container.NetworkMode("none"), Resources: container.Resources{ - PidsLimit: func(v int64) *int64 { return &v }(64), - CPUPeriod: 100_000, - CPUQuota: 100_000, + Memory: containerBytes, + MemorySwap: containerBytes, + PidsLimit: func(v int64) *int64 { return &v }(64), + CPUPeriod: 100_000, + CPUQuota: 100_000, }, SecurityOpt: []string{"no-new-privileges"}, CgroupnsMode: container.CgroupnsModePrivate, IpcMode: container.IpcMode("private"), + CapDrop: []string{"ALL"}, } } From 5c5e1b39ab4ae986077fd0d815f2fb91886b20fb Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 15:00:51 +0100 Subject: [PATCH 08/18] refactor: replace file count validation with path depth check in tar extraction --- utils/utils.go | 55 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 12 deletions(-) diff --git a/utils/utils.go b/utils/utils.go index a35ab23..a154d3d 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -308,14 +308,41 @@ func isAllowedDirectory(header *tar.Header, allowedSet map[string]struct{}) bool return allowed } -// validateFileCount enforces the maximum number of files per directory. -func validateFileCount(header *tar.Header, dirFileCount map[string]int, maxFilesInDir int) error { - if header.Typeflag == tar.TypeReg { - dirPath := filepath.Dir(header.Name) - dirFileCount[dirPath]++ - if dirFileCount[dirPath] > maxFilesInDir { - return fmt.Errorf("directory %s exceeds maximum file count of %d", dirPath, maxFilesInDir) - } +func validatePathDepth(entryPath string) error { + const maxDepth = 1 + + parts := strings.Split(entryPath, string(os.PathSeparator)) + if len(parts) > maxDepth+1 { + return fmt.Errorf("subdirectories not allowed in archives: %s", entryPath) + } + + return nil +} + +// validateTarEntrySize validates that a tar entry doesn't exceed maximum file size. +func validateTarEntrySize(header *tar.Header, maxFileSize int64) error { + if header.Typeflag == tar.TypeReg && header.Size > maxFileSize { + return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) + } + return nil +} + +// validateAndTrackFileCount validates per-directory file count and updates the tracking map. +func validateAndTrackFileCount(header *tar.Header, dirFileCount map[string]int, maxFilesInDir int) error { + if header.Typeflag != tar.TypeReg { + return nil + } + + // Extract the allowed directory name (second path component) + parts := strings.Split(header.Name, string(os.PathSeparator)) + if len(parts) < 2 { + return fmt.Errorf("invalid file path structure: %s", header.Name) + } + dirName := parts[1] + + dirFileCount[dirName]++ + if dirFileCount[dirName] > maxFilesInDir { + return fmt.Errorf("directory %s exceeds maximum file count of %d", dirName, maxFilesInDir) } return nil } @@ -339,6 +366,7 @@ func ExtractTarArchiveFiltered( allowedSet[dir] = struct{}{} } + // Track files per allowed directory to enforce per-directory limits dirFileCount := make(map[string]int, len(allowedDirs)) for { @@ -354,12 +382,15 @@ func ExtractTarArchiveFiltered( continue } - // Validate file size for regular files. - if header.Typeflag == tar.TypeReg && header.Size > maxFileSize { - return fmt.Errorf("file %s exceeds maximum size of %d bytes (size: %d)", header.Name, maxFileSize, header.Size) + if err := validateTarEntrySize(header, maxFileSize); err != nil { + return err + } + + if err := validatePathDepth(header.Name); err != nil { + return err } - if err := validateFileCount(header, dirFileCount, maxFilesInDir); err != nil { + if err := validateAndTrackFileCount(header, dirFileCount, maxFilesInDir); err != nil { return err } From e5db573fd32eff7f415cd73d2bd107ddf7ba5543 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 15:03:25 +0100 Subject: [PATCH 09/18] refactor: reduce PidsLimit in buildHostConfig from 64 to 4 for improved resource management --- internal/stages/executor/executor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 5c95b67..048660b 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -273,7 +273,7 @@ func buildHostConfig(testCases []messages.TestCase) *container.HostConfig { Resources: container.Resources{ Memory: containerBytes, MemorySwap: containerBytes, - PidsLimit: func(v int64) *int64 { return &v }(64), + PidsLimit: func(v int64) *int64 { return &v }(4), CPUPeriod: 100_000, CPUQuota: 100_000, }, From c47f892ab8a887178794fcd036b225a79b246ca9 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Mon, 29 Dec 2025 15:08:16 +0100 Subject: [PATCH 10/18] refactor: increase PidsLimit in buildHostConfig from 4 to 8 for enhanced process management; update max path depth in validation from 1 to 2 --- internal/stages/executor/executor.go | 2 +- utils/utils.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 048660b..93c47ea 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -273,7 +273,7 @@ func buildHostConfig(testCases []messages.TestCase) *container.HostConfig { Resources: container.Resources{ Memory: containerBytes, MemorySwap: containerBytes, - PidsLimit: func(v int64) *int64 { return &v }(4), + PidsLimit: func(v int64) *int64 { return &v }(8), CPUPeriod: 100_000, CPUQuota: 100_000, }, diff --git a/utils/utils.go b/utils/utils.go index a154d3d..4d83334 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -309,7 +309,7 @@ func isAllowedDirectory(header *tar.Header, allowedSet map[string]struct{}) bool } func validatePathDepth(entryPath string) error { - const maxDepth = 1 + const maxDepth = 2 parts := strings.Split(entryPath, string(os.PathSeparator)) if len(parts) > maxDepth+1 { From 9c21d9939e33bb40de9e4fdfc5fe42082052993c Mon Sep 17 00:00:00 2001 From: Matios102 <127307847+Matios102@users.noreply.github.com> Date: Mon, 29 Dec 2025 16:03:53 +0100 Subject: [PATCH 11/18] Update internal/stages/executor/cpp/Dockerfile Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- internal/stages/executor/cpp/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/stages/executor/cpp/Dockerfile b/internal/stages/executor/cpp/Dockerfile index c930b39..3ff614c 100644 --- a/internal/stages/executor/cpp/Dockerfile +++ b/internal/stages/executor/cpp/Dockerfile @@ -9,6 +9,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY run_tests.sh /usr/local/bin/run_tests.sh RUN chmod +x /usr/local/bin/run_tests.sh -RUN useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner +RUN groupadd -g 1000 runner && useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner USER runner WORKDIR /tmp From df7d712c6fd96958649ebdb7427aaf93ff799d63 Mon Sep 17 00:00:00 2001 From: Matios102 <127307847+Matios102@users.noreply.github.com> Date: Tue, 30 Dec 2025 11:12:54 +0100 Subject: [PATCH 12/18] Update internal/stages/executor/python/Dockerfile Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- internal/stages/executor/python/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/stages/executor/python/Dockerfile b/internal/stages/executor/python/Dockerfile index a4822c9..4a01929 100644 --- a/internal/stages/executor/python/Dockerfile +++ b/internal/stages/executor/python/Dockerfile @@ -16,6 +16,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY run_tests.sh /usr/local/bin/run_tests.sh RUN chmod +x /usr/local/bin/run_tests.sh -RUN useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner +RUN groupadd -g 1000 runner && useradd -u 1000 -g 1000 -M -s /usr/sbin/nologin runner USER runner WORKDIR /tmp From 247f60da005131715f235012f61032954b02e6ef Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 11:11:30 +0100 Subject: [PATCH 13/18] handle compilation inside the container, remove compiler class from the stages --- Dockerfile | 3 +- cmd/main.go | 4 +- internal/pipeline/pipeline.go | 49 ++--- internal/scheduler/scheduler.go | 7 +- internal/stages/compiler/compiler.go | 65 ------ internal/stages/compiler/compiler_test.go | 78 -------- internal/stages/compiler/cpp_compiler.go | 72 ------- internal/stages/compiler/cpp_compiler_test.go | 128 ------------ internal/stages/executor/cpp/Dockerfile | 2 +- internal/stages/executor/cpp/run_tests.sh | 188 ++++++++++++++++++ internal/stages/executor/executor.go | 45 ++++- internal/stages/executor/run_tests.sh | 40 ++++ 12 files changed, 287 insertions(+), 394 deletions(-) delete mode 100644 internal/stages/compiler/compiler.go delete mode 100644 internal/stages/compiler/compiler_test.go delete mode 100644 internal/stages/compiler/cpp_compiler.go delete mode 100644 internal/stages/compiler/cpp_compiler_test.go create mode 100644 internal/stages/executor/cpp/run_tests.sh diff --git a/Dockerfile b/Dockerfile index ce6607e..8786afe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ FROM golang:1.23-bookworm AS builder RUN apt-get update && \ - apt-get install -y sudo debootstrap schroot g++ && \ + apt-get install -y sudo debootstrap schroot && \ rm -rf /var/lib/apt/lists/* WORKDIR /app @@ -17,7 +17,6 @@ FROM debian:bookworm-slim RUN apt-get update && \ apt-get install -y \ - g++ \ docker.io \ && rm -rf /var/lib/apt/lists/* diff --git a/cmd/main.go b/cmd/main.go index 39c33ef..6c77d7d 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -8,7 +8,6 @@ import ( "github.com/mini-maxit/worker/internal/rabbitmq/consumer" "github.com/mini-maxit/worker/internal/rabbitmq/responder" "github.com/mini-maxit/worker/internal/scheduler" - "github.com/mini-maxit/worker/internal/stages/compiler" "github.com/mini-maxit/worker/internal/stages/executor" "github.com/mini-maxit/worker/internal/stages/packager" "github.com/mini-maxit/worker/internal/stages/verifier" @@ -43,7 +42,6 @@ func main() { // Initialize the services storage := storage.NewStorage(config.StorageBaseUrl) - compiler := compiler.NewCompiler() packager := packager.NewPackager(storage) executor := executor.NewExecutor(dCli) verifier := verifier.NewVerifier(config.VerifierFlags) @@ -53,7 +51,7 @@ func main() { logger.Error("Failed to close responder", err) } }() - scheduler := scheduler.NewScheduler(config.MaxWorkers, compiler, packager, executor, verifier, responder) + scheduler := scheduler.NewScheduler(config.MaxWorkers, packager, executor, verifier, responder) consumer := consumer.NewConsumer(workerChannel, config.ConsumeQueueName, scheduler, responder) // Start listening for messages diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index d3db195..fff7dc8 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -1,17 +1,15 @@ package pipeline import ( - "errors" "fmt" + "os" "github.com/mini-maxit/worker/internal/logger" "github.com/mini-maxit/worker/internal/rabbitmq/responder" - "github.com/mini-maxit/worker/internal/stages/compiler" "github.com/mini-maxit/worker/internal/stages/executor" "github.com/mini-maxit/worker/internal/stages/packager" "github.com/mini-maxit/worker/internal/stages/verifier" "github.com/mini-maxit/worker/pkg/constants" - customErr "github.com/mini-maxit/worker/pkg/errors" "github.com/mini-maxit/worker/pkg/languages" "github.com/mini-maxit/worker/pkg/messages" "github.com/mini-maxit/worker/pkg/solution" @@ -37,7 +35,6 @@ type worker struct { id int state WorkerState responseQueue string - compiler compiler.Compiler packager packager.Packager executor executor.Executor verifier verifier.Verifier @@ -47,7 +44,6 @@ type worker struct { func NewWorker( id int, - compiler compiler.Compiler, packager packager.Packager, executor executor.Executor, verifier verifier.Verifier, @@ -58,7 +54,6 @@ func NewWorker( return &worker{ id: id, state: WorkerState{Status: constants.WorkerStatusIdle, ProcessingMessageID: ""}, - compiler: compiler, packager: packager, executor: executor, verifier: verifier, @@ -136,28 +131,7 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta } }() - err = ws.compiler.CompileSolutionIfNeeded( - langType, - task.LanguageVersion, - dc.UserSolutionPath, - dc.UserExecFilePath, - dc.CompileErrFilePath, - messageID) - - if err != nil { - if errors.Is(err, customErr.ErrCompilationFailed) { - ws.publishCompilationError(dc, task.TestCases) - return - } - - ws.responder.PublishTaskErrorToResponseQueue( - constants.QueueMessageTypeTask, - ws.state.ProcessingMessageID, - ws.responseQueue, - err, - ) - return - } + requiresCompilation := !langType.IsScriptingLanguage() limits := make([]solution.Limit, len(task.TestCases)) for i, tc := range task.TestCases { @@ -168,15 +142,24 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta } cfg := executor.CommandConfig{ - MessageID: messageID, - DirConfig: dc, - LanguageType: langType, - LanguageVersion: task.LanguageVersion, - TestCases: task.TestCases, + MessageID: messageID, + DirConfig: dc, + LanguageType: langType, + LanguageVersion: task.LanguageVersion, + TestCases: task.TestCases, + SourceFilePath: dc.UserSolutionPath, + RequiresCompiling: requiresCompilation, } err = ws.executor.ExecuteCommand(cfg) if err != nil { + // Check if it's a compilation error by checking if the error file has content + if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { + // Compilation error file has content, treat as compilation error + ws.publishCompilationError(dc, task.TestCases) + return + } + ws.responder.PublishTaskErrorToResponseQueue( constants.QueueMessageTypeTask, ws.state.ProcessingMessageID, diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go index 633032d..0581335 100644 --- a/internal/scheduler/scheduler.go +++ b/internal/scheduler/scheduler.go @@ -6,7 +6,6 @@ import ( "github.com/mini-maxit/worker/internal/logger" "github.com/mini-maxit/worker/internal/pipeline" "github.com/mini-maxit/worker/internal/rabbitmq/responder" - "github.com/mini-maxit/worker/internal/stages/compiler" "github.com/mini-maxit/worker/internal/stages/executor" "github.com/mini-maxit/worker/internal/stages/packager" "github.com/mini-maxit/worker/internal/stages/verifier" @@ -30,14 +29,13 @@ type scheduler struct { func NewScheduler( maxWorkers int, - compiler compiler.Compiler, packager packager.Packager, executor executor.Executor, verifier verifier.Verifier, responder responder.Responder, ) Scheduler { - return NewSchedulerWithWorkers(maxWorkers, nil, compiler, packager, executor, verifier, responder) + return NewSchedulerWithWorkers(maxWorkers, nil, packager, executor, verifier, responder) } // NewSchedulerWithWorkers creates a Scheduler using the provided worker map. @@ -46,7 +44,6 @@ func NewScheduler( func NewSchedulerWithWorkers( maxWorkers int, workers map[int]pipeline.Worker, - compiler compiler.Compiler, packager packager.Packager, executor executor.Executor, verifier verifier.Verifier, @@ -55,7 +52,7 @@ func NewSchedulerWithWorkers( if workers == nil { workers = make(map[int]pipeline.Worker, maxWorkers) for i := range maxWorkers { - workers[i] = pipeline.NewWorker(i, compiler, packager, executor, verifier, responder) + workers[i] = pipeline.NewWorker(i, packager, executor, verifier, responder) } } diff --git a/internal/stages/compiler/compiler.go b/internal/stages/compiler/compiler.go deleted file mode 100644 index 37f7fc0..0000000 --- a/internal/stages/compiler/compiler.go +++ /dev/null @@ -1,65 +0,0 @@ -package compiler - -import ( - customErr "github.com/mini-maxit/worker/pkg/errors" - "github.com/mini-maxit/worker/pkg/languages" -) - -type Compiler interface { - CompileSolutionIfNeeded( - langType languages.LanguageType, - langVersion string, - sourceFilePath string, - outFilePath string, - compErrFilePath string, - messageID string, - ) error -} - -type compiler struct { -} - -func NewCompiler() Compiler { - return &compiler{} -} - -// LanguageCompiler is a language-specific compiler invoked internally. -type LanguageCompiler interface { - Compile(sourceFilePath, outFilePath, compErrFilePath, messageID string) error -} - -func initializeSolutionCompiler( - langType languages.LanguageType, - langVersion string, - messageID string, -) (LanguageCompiler, error) { - switch langType { - case languages.CPP: - return NewCppCompiler(langVersion, messageID) - case languages.PYTHON: - return nil, customErr.ErrInvalidLanguageType // Make linter happy - default: - return nil, customErr.ErrInvalidLanguageType - } -} - -// get proper compiler and compile if needed. -func (c *compiler) CompileSolutionIfNeeded( - langType languages.LanguageType, - langVersion, - sourceFilePath, - execFilePath, - compErrFilePath, - messageID string, -) error { - if langType.IsScriptingLanguage() { - return nil - } - - compiler, err := initializeSolutionCompiler(langType, langVersion, messageID) - if err != nil { - return err - } - - return compiler.Compile(sourceFilePath, execFilePath, compErrFilePath, messageID) -} diff --git a/internal/stages/compiler/compiler_test.go b/internal/stages/compiler/compiler_test.go deleted file mode 100644 index 3d46ad9..0000000 --- a/internal/stages/compiler/compiler_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package compiler_test - -import ( - "context" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "errors" - - . "github.com/mini-maxit/worker/internal/stages/compiler" - pkgErr "github.com/mini-maxit/worker/pkg/errors" - "github.com/mini-maxit/worker/pkg/languages" - "github.com/mini-maxit/worker/tests" -) - -func TestNewCompiler(t *testing.T) { - c := NewCompiler() - if c == nil { - t.Fatalf("NewCompiler returned nil") - } -} - -func TestCompileSolutionIfNeeded_Success(t *testing.T) { - dir := t.TempDir() - src := tests.WriteFile(t, dir, "main.cpp", ` - - #include - - int main() { - std::cout << "hello" << std::endl; - return 0; - } - `) - - out := filepath.Join(dir, "outbin") - compErr := filepath.Join(dir, "compile.err") - - c := NewCompiler() - - if err := c.CompileSolutionIfNeeded(languages.CPP, "17", src, out, compErr, "msg-id"); err != nil { - t.Fatalf("expected compile to succeed, got: %v", err) - } - - if info, err := os.Stat(out); err != nil { - t.Fatalf("expected output binary to exist: %v", err) - } else if info.IsDir() { - t.Fatalf("expected output to be a file, got dir") - } - - // run the produced binary to ensure it runs and returns 0 - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, out) - if outb, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("running compiled binary failed: %v, output: %s", err, string(outb)) - } -} - -func TestCompileSolutionIfNeeded_InvalidLanguage(t *testing.T) { - dir := t.TempDir() - src := tests.WriteFile(t, dir, "main.txt", `dummy`) - out := filepath.Join(dir, "out") - compErr := filepath.Join(dir, "compile.err") - - c := NewCompiler() - - // use an invalid language value (0) - err := c.CompileSolutionIfNeeded(languages.LanguageType(0), "", src, out, compErr, "msg-id") - if err == nil { - t.Fatalf("expected error for invalid language") - } - if !errors.Is(err, pkgErr.ErrInvalidLanguageType) { - t.Fatalf("expected ErrInvalidLanguageType, got: %v", err) - } -} diff --git a/internal/stages/compiler/cpp_compiler.go b/internal/stages/compiler/cpp_compiler.go deleted file mode 100644 index 3ae7707..0000000 --- a/internal/stages/compiler/cpp_compiler.go +++ /dev/null @@ -1,72 +0,0 @@ -package compiler - -import ( - "bytes" - "context" - "os" - "os/exec" - - "github.com/mini-maxit/worker/internal/logger" - "github.com/mini-maxit/worker/pkg/errors" - "github.com/mini-maxit/worker/pkg/languages" - "go.uber.org/zap" -) - -type CppCompiler struct { - version string - logger *zap.SugaredLogger -} - -func (e *CppCompiler) RequiresCompilation() bool { - return true -} - -// For now compile allows only one file. -func (e *CppCompiler) Compile(sourceFilePath, execFilePath, compErrFilePath, messageID string) error { - e.logger.Infof("Compiling %s [MsgID: %s]", sourceFilePath, messageID) - // Correctly pass the command and its arguments as separate strings. - versionFlag := "-std=" + e.version - ctx := context.Background() // TODO: use a timeout context - cmd := exec.CommandContext(ctx, "g++", "-o", execFilePath, versionFlag, sourceFilePath) - - var stderr bytes.Buffer - cmd.Stderr = &stderr - - cmdErr := cmd.Run() - if cmdErr != nil { - // Save stderr to a file - e.logger.Errorf("Error during compilation. %s [MsgID: %s]", cmdErr.Error(), messageID) - file, err := os.Create(compErrFilePath) - if err != nil { - e.logger.Errorf("Could not create stderr file. %s [MsgID: %s]", err.Error(), messageID) - return err - } - - _, err = file.Write(stderr.Bytes()) - if err != nil { - e.logger.Errorf("Error writing to file. %s [MsgID: %s]", err.Error(), messageID) - return err - } - err = file.Close() - if err != nil { - e.logger.Errorf("Error closing file. %s [MsgID: %s]", err.Error(), messageID) - return err - } - e.logger.Infof("Compilation error saved to %s [MsgID: %s]", compErrFilePath, messageID) - return errors.ErrCompilationFailed - } - e.logger.Infof("Compilation successful [MsgID: %s]", messageID) - return nil -} - -func NewCppCompiler(version, messageID string) (*CppCompiler, error) { - logger := logger.NewNamedLogger("cpp-compiler") - versionFlag, err := languages.GetVersionFlag(languages.CPP, version) - if err != nil { - logger.Errorf("Failed to get version flag. %s [MsgID: %s]", err.Error(), messageID) - return nil, err - } - return &CppCompiler{ - version: versionFlag, - logger: logger}, nil -} diff --git a/internal/stages/compiler/cpp_compiler_test.go b/internal/stages/compiler/cpp_compiler_test.go deleted file mode 100644 index 702a588..0000000 --- a/internal/stages/compiler/cpp_compiler_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package compiler_test - -import ( - "context" - "os" - "os/exec" - "path/filepath" - "testing" - "time" - - "errors" - - . "github.com/mini-maxit/worker/internal/stages/compiler" - pkgErr "github.com/mini-maxit/worker/pkg/errors" - "github.com/mini-maxit/worker/tests" -) - -func TestRequiresCompilation(t *testing.T) { - c, err := NewCppCompiler("17", "msg-test") - if err != nil { - t.Fatalf("NewCppCompiler returned error: %v", err) - } - if !c.RequiresCompilation() { - t.Fatalf("expected RequiresCompilation to be true") - } -} - -func TestCompileSuccess(t *testing.T) { - dir := t.TempDir() - src := tests.WriteFile(t, dir, "main.cpp", ` - - #include - - int main() { - std::cout << "hello" << std::endl; - return 0; - } - `) - - out := filepath.Join(dir, "outbin") - compErr := filepath.Join(dir, "compile.err") - - c, err := NewCppCompiler("17", "msg-test") - if err != nil { - t.Fatalf("NewCppCompiler returned error: %v", err) - } - - if err := c.Compile(src, out, compErr, "msg-id"); err != nil { - t.Fatalf("expected compile to succeed, got: %v", err) - } - - // binary should exist and be executable - if info, err := os.Stat(out); err != nil { - t.Fatalf("expected output binary to exist: %v", err) - } else if info.IsDir() { - t.Fatalf("expected output to be a file, got dir") - } - - // run the produced binary to ensure it runs and returns 0 - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, out) - if outb, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("running compiled binary failed: %v, output: %s", err, string(outb)) - } -} - -func TestCompileFailureProducesErrorFile(t *testing.T) { - dir := t.TempDir() - // intentionally broken C++ source - src := tests.WriteFile(t, dir, "bad.cpp", ` - - #include - - int main() { - this is not valid C++ - } - `) - - out := filepath.Join(dir, "outbad") - compErr := filepath.Join(dir, "compile.err") - - c, err := NewCppCompiler("17", "msg-id") - if err != nil { - t.Fatalf("NewCppCompiler returned error: %v", err) - } - - err = c.Compile(src, out, compErr, "msg-id") - if err == nil { - t.Fatalf("expected compile to fail for broken source") - } - if !errors.Is(err, pkgErr.ErrCompilationFailed) { - t.Fatalf("expected ErrCompilationFailed, got: %v", err) - } - - // error file should exist and contain some data - info, statErr := os.Stat(compErr) - if statErr != nil { - t.Fatalf("expected compile.err to exist, stat error: %v", statErr) - } - if info.Size() == 0 { - t.Fatalf("expected compile.err to contain compiler stderr") - } -} - -func TestNewCppCompilerIntegration(t *testing.T) { - // NewCppCompiler uses languages.GetVersionFlag; pass valid version '17' - cc, err := NewCppCompiler("17", "msg-test") - if err != nil { - t.Fatalf("NewCppCompiler returned error: %v", err) - } - - dir := t.TempDir() - src := tests.WriteFile(t, dir, "main2.cpp", ` - #include - - int main() { - printf("ok\n"); - return 0; - } - `) - out := filepath.Join(dir, "out2") - compErr := filepath.Join(dir, "compile2.err") - - if err := cc.Compile(src, out, compErr, "msg-test"); err != nil { - t.Fatalf("expected compile to succeed using NewCppCompiler, got: %v", err) - } -} diff --git a/internal/stages/executor/cpp/Dockerfile b/internal/stages/executor/cpp/Dockerfile index 3ff614c..53f991d 100644 --- a/internal/stages/executor/cpp/Dockerfile +++ b/internal/stages/executor/cpp/Dockerfile @@ -3,7 +3,7 @@ FROM debian:bookworm-slim ARG DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y --no-install-recommends \ - libstdc++6 ca-certificates time \ + libstdc++6 ca-certificates time g++\ && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* COPY run_tests.sh /usr/local/bin/run_tests.sh diff --git a/internal/stages/executor/cpp/run_tests.sh b/internal/stages/executor/cpp/run_tests.sh new file mode 100644 index 0000000..7243e2b --- /dev/null +++ b/internal/stages/executor/cpp/run_tests.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash + +set -o pipefail +shopt -s nullglob + +# sanity checks for required env vars +if [[ -z "${RUN_CMD:-}" ]]; then + echo "ERROR: RUN_CMD must be set" >&2 + exit 1 +fi + +if [[ -z "${INPUT_FILES:-}" ]]; then + echo "ERROR: INPUT_FILES must be set" >&2 + exit 1 +fi + +if [[ -z "${USER_OUTPUT_FILES:-}" ]]; then + echo "ERROR: USER_OUTPUT_FILES must be set" >&2 + exit 1 +fi + +if [[ -z "${USER_EXEC_RESULT_FILES:-}" ]]; then + echo "ERROR: USER_EXEC_RESULT_FILES must be set" >&2 + exit 1 +fi + +if [[ -z "${USER_ERROR_FILES:-}" ]]; then + echo "ERROR: USER_ERROR_FILES must be set" >&2 + exit 1 +fi + +# Handle compilation if needed +if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then + if [[ -z "${COMPILE_CMD:-}" ]]; then + echo "ERROR: COMPILE_CMD must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${SOURCE_FILE:-}" ]]; then + echo "ERROR: SOURCE_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${EXEC_FILE:-}" ]]; then + echo "ERROR: EXEC_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${COMPILE_ERR_FILE:-}" ]]; then + echo "ERROR: COMPILE_ERR_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + + # Parse the COMPILE_CMD (it's space-separated) + read -r -a compile_cmd <<< "$COMPILE_CMD" + + echo "Compiling: ${compile_cmd[@]}" + + # Run compilation and capture stderr + if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then + echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" + exit 1 + fi + + # Check if executable was created + if [[ ! -f "${EXEC_FILE}" ]]; then + echo "Compilation failed: executable not created" > "${COMPILE_ERR_FILE}" + exit 1 + fi + + # Make executable if needed + chmod +x "${EXEC_FILE}" 2>/dev/null || true +fi + +read -r -a times <<< "$TIME_LIMITS_MS" +read -r -a mems <<< "$MEM_LIMITS_KB" +read -r -a inputs <<< "$INPUT_FILES" +read -r -a user_outputs <<< "$USER_OUTPUT_FILES" +read -r -a user_errors <<< "$USER_ERROR_FILES" +read -r -a exec_results <<< "$USER_EXEC_RESULT_FILES" + +# Basic count validations +if (( ${#times[@]} != ${#mems[@]} )); then + echo "ERROR: TIME_LIMITS and MEM_LIMITS must have same count" >&2 + exit 1 +fi + +n=${#inputs[@]} +if (( ${#times[@]} != n )); then + echo "ERROR: TIME/MEM count must match number of INPUT_FILES (${n})" >&2 + exit 1 +fi + +if (( ${#user_outputs[@]} != n )); then + echo "ERROR: USER_OUTPUT_FILES count must match number of INPUT_FILES" >&2 + exit 1 +fi + +if (( ${#user_errors[@]} != n )); then + echo "ERROR: USER_ERROR_FILES count must match number of INPUT_FILES" >&2 + exit 1 +fi + +if (( ${#exec_results[@]} != n )); then + echo "ERROR: USER_EXEC_RESULT_FILES count must match number of INPUT_FILES" >&2 + exit 1 +fi + +# Ensure all input files exist before running +missing=0 +for f in "${inputs[@]}"; do + if [[ ! -f "$f" ]]; then + echo "ERROR: input file not found: $f" >&2 + missing=1 + fi +done +if (( missing )); then + exit 1 +fi + +for idx in "${!inputs[@]}"; do + testno=$((idx+1)) + infile="${inputs[idx]}" + tlimit_ms=${times[idx]} + mlimit_kb=${mems[idx]} + + out="${user_outputs[idx]}" + err="${user_errors[idx]}" + exec_result="${exec_results[idx]}" + + # create parent directories if necessary + mkdir -p "$(dirname "${out}")" + mkdir -p "$(dirname "${err}")" + mkdir -p "$(dirname "${exec_result}")" + + # run in subshell to isolate failure + ( + # temp file to store peak memory from /usr/bin/time + peak_mem_file=$(mktemp) + + tlimit_s=$(awk "BEGIN {print ${tlimit_ms}/1000}") + + start_ns=$(date +%s%N) + + # Run the command with timeout and track peak memory usage + hard_kb=$(( mlimit_kb + mlimit_kb / 10 )) + timeout --preserve-status "${tlimit_s}s" \ + /usr/bin/time -f "%M" -o "${peak_mem_file}" \ + bash -c "ulimit -S -v ${mlimit_kb} && ulimit -H -v ${hard_kb} && ${RUN_CMD}" < "$infile" \ + > "${out}" 2> "${err}" + code=$? + + end_ns=$(date +%s%N) + + if [[ ! -s "${peak_mem_file}" ]]; then + peak_kb=0 + else + peak_kb=$(grep -o '[0-9]*' "${peak_mem_file}" | tail -n 1) + fi + rm -f "${peak_mem_file}" + + sig=0 + if (( code > 128 )); then + sig=$((code - 128)) + fi + + is_mle=0 + if (( sig == 15 )); then + echo "Time limit exceeded after ${tlimit_ms}ms" >> "${err}" + # SIGKILL(9), SIGSEGV(11), SIGABRT(6), SIGBUS(10) possibly mem limit exceeded, but this is NOT guaranteed by POSIX. + elif (( sig == 9 || sig == 11 || sig == 6 || sig == 10 )); then + # Require that we were close to the memory limit + threshold_kb=$(( mlimit_kb * 9 / 10 )) + if (( peak_kb >= threshold_kb )); then + is_mle=1 + fi + fi + + if ((is_mle)); then + echo "Memory limit exceeded max ${mlimit_kb}KB" >> "${err}" + code=134 + fi + + duration_ns=$((end_ns - start_ns)) + duration_sec=$(awk "BEGIN {printf \"%.6f\", ${duration_ns}/1000000000}") + + # Write exit code, duration, and peak memory (KB) to exec result file + echo "$code $duration_sec $peak_kb" > "${exec_result}" + ) + +done diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 93c47ea..d324bbf 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -27,11 +27,13 @@ import ( var containerNameRegex = regexp.MustCompile("[^a-zA-Z0-9_.-]") type CommandConfig struct { - MessageID string - DirConfig *packager.TaskDirConfig - LanguageType languages.LanguageType - LanguageVersion string - TestCases []messages.TestCase + MessageID string + DirConfig *packager.TaskDirConfig + LanguageType languages.LanguageType + LanguageVersion string + TestCases []messages.TestCase + SourceFilePath string + RequiresCompiling bool } type ExecutionResult struct { @@ -181,6 +183,22 @@ func SanitizeContainerName(raw string) string { return "submission-" + cleaned } +func buildCompileCommand(cfg CommandConfig) []string { + switch cfg.LanguageType { + case languages.CPP: + versionFlag, _ := languages.GetVersionFlag(languages.CPP, cfg.LanguageVersion) + return []string{ + "g++", + "-o", + filepath.Base(cfg.DirConfig.UserExecFilePath), + "-std=" + versionFlag, + filepath.Base(cfg.SourceFilePath), + } + default: + return []string{} + } +} + func buildEnvironmentVariables(cfg CommandConfig) ([]string, error) { timeEnv := make([]string, len(cfg.TestCases)) memEnv := make([]string, len(cfg.TestCases)) @@ -220,7 +238,7 @@ func buildEnvironmentVariables(cfg CommandConfig) ([]string, error) { fmt.Sprintf("%d.%s", tc.Order, constants.ExecutionResultFileExt)) } - return []string{ + envVars := []string{ "RUN_CMD=" + utils.ShellQuoteSlice(runCmd), "TIME_LIMITS_MS=" + utils.ShellQuoteSlice(timeEnv), "MEM_LIMITS_KB=" + utils.ShellQuoteSlice(memEnv), @@ -228,7 +246,20 @@ func buildEnvironmentVariables(cfg CommandConfig) ([]string, error) { "USER_OUTPUT_FILES=" + utils.ShellQuoteSlice(userOutputFilePaths), "USER_ERROR_FILES=" + utils.ShellQuoteSlice(userErrorFilePaths), "USER_EXEC_RESULT_FILES=" + utils.ShellQuoteSlice(userExecResultFilePaths), - }, nil + } + + if cfg.RequiresCompiling { + compileCmd := buildCompileCommand(cfg) + envVars = append(envVars, + "REQUIRES_COMPILATION=true", + "COMPILE_CMD="+utils.ShellQuoteSlice(compileCmd), + "SOURCE_FILE="+filepath.Base(cfg.SourceFilePath), + "EXEC_FILE="+filepath.Base(cfg.DirConfig.UserExecFilePath), + "COMPILE_ERR_FILE="+filepath.Base(cfg.DirConfig.CompileErrFilePath), + ) + } + + return envVars, nil } func buildContainerConfig( diff --git a/internal/stages/executor/run_tests.sh b/internal/stages/executor/run_tests.sh index cb52d7f..7243e2b 100644 --- a/internal/stages/executor/run_tests.sh +++ b/internal/stages/executor/run_tests.sh @@ -29,6 +29,46 @@ if [[ -z "${USER_ERROR_FILES:-}" ]]; then exit 1 fi +# Handle compilation if needed +if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then + if [[ -z "${COMPILE_CMD:-}" ]]; then + echo "ERROR: COMPILE_CMD must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${SOURCE_FILE:-}" ]]; then + echo "ERROR: SOURCE_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${EXEC_FILE:-}" ]]; then + echo "ERROR: EXEC_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + if [[ -z "${COMPILE_ERR_FILE:-}" ]]; then + echo "ERROR: COMPILE_ERR_FILE must be set when REQUIRES_COMPILATION is true" >&2 + exit 1 + fi + + # Parse the COMPILE_CMD (it's space-separated) + read -r -a compile_cmd <<< "$COMPILE_CMD" + + echo "Compiling: ${compile_cmd[@]}" + + # Run compilation and capture stderr + if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then + echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" + exit 1 + fi + + # Check if executable was created + if [[ ! -f "${EXEC_FILE}" ]]; then + echo "Compilation failed: executable not created" > "${COMPILE_ERR_FILE}" + exit 1 + fi + + # Make executable if needed + chmod +x "${EXEC_FILE}" 2>/dev/null || true +fi + read -r -a times <<< "$TIME_LIMITS_MS" read -r -a mems <<< "$MEM_LIMITS_KB" read -r -a inputs <<< "$INPUT_FILES" From b9197a649f108c5b840f21cea57fd61c78a36c1a Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 11:23:26 +0100 Subject: [PATCH 14/18] whitespace --- internal/stages/executor/cpp/run_tests.sh | 2 +- internal/stages/executor/run_tests.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/stages/executor/cpp/run_tests.sh b/internal/stages/executor/cpp/run_tests.sh index 7243e2b..b39b927 100644 --- a/internal/stages/executor/cpp/run_tests.sh +++ b/internal/stages/executor/cpp/run_tests.sh @@ -52,7 +52,7 @@ if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then read -r -a compile_cmd <<< "$COMPILE_CMD" echo "Compiling: ${compile_cmd[@]}" - + # Run compilation and capture stderr if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" diff --git a/internal/stages/executor/run_tests.sh b/internal/stages/executor/run_tests.sh index 7243e2b..b39b927 100644 --- a/internal/stages/executor/run_tests.sh +++ b/internal/stages/executor/run_tests.sh @@ -52,7 +52,7 @@ if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then read -r -a compile_cmd <<< "$COMPILE_CMD" echo "Compiling: ${compile_cmd[@]}" - + # Run compilation and capture stderr if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" From d768e258b34d7da5f9ca4505c96a801ca31bf82f Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 11:15:28 +0100 Subject: [PATCH 15/18] fix: adjust compilation error handling to exit with 0 on compilation error --- internal/pipeline/pipeline.go | 2 - internal/stages/executor/cpp/run_tests.sh | 188 ---------------------- internal/stages/executor/run_tests.sh | 5 +- 3 files changed, 2 insertions(+), 193 deletions(-) delete mode 100644 internal/stages/executor/cpp/run_tests.sh diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index fff7dc8..028f5a5 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -153,9 +153,7 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta err = ws.executor.ExecuteCommand(cfg) if err != nil { - // Check if it's a compilation error by checking if the error file has content if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { - // Compilation error file has content, treat as compilation error ws.publishCompilationError(dc, task.TestCases) return } diff --git a/internal/stages/executor/cpp/run_tests.sh b/internal/stages/executor/cpp/run_tests.sh deleted file mode 100644 index b39b927..0000000 --- a/internal/stages/executor/cpp/run_tests.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail -shopt -s nullglob - -# sanity checks for required env vars -if [[ -z "${RUN_CMD:-}" ]]; then - echo "ERROR: RUN_CMD must be set" >&2 - exit 1 -fi - -if [[ -z "${INPUT_FILES:-}" ]]; then - echo "ERROR: INPUT_FILES must be set" >&2 - exit 1 -fi - -if [[ -z "${USER_OUTPUT_FILES:-}" ]]; then - echo "ERROR: USER_OUTPUT_FILES must be set" >&2 - exit 1 -fi - -if [[ -z "${USER_EXEC_RESULT_FILES:-}" ]]; then - echo "ERROR: USER_EXEC_RESULT_FILES must be set" >&2 - exit 1 -fi - -if [[ -z "${USER_ERROR_FILES:-}" ]]; then - echo "ERROR: USER_ERROR_FILES must be set" >&2 - exit 1 -fi - -# Handle compilation if needed -if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then - if [[ -z "${COMPILE_CMD:-}" ]]; then - echo "ERROR: COMPILE_CMD must be set when REQUIRES_COMPILATION is true" >&2 - exit 1 - fi - if [[ -z "${SOURCE_FILE:-}" ]]; then - echo "ERROR: SOURCE_FILE must be set when REQUIRES_COMPILATION is true" >&2 - exit 1 - fi - if [[ -z "${EXEC_FILE:-}" ]]; then - echo "ERROR: EXEC_FILE must be set when REQUIRES_COMPILATION is true" >&2 - exit 1 - fi - if [[ -z "${COMPILE_ERR_FILE:-}" ]]; then - echo "ERROR: COMPILE_ERR_FILE must be set when REQUIRES_COMPILATION is true" >&2 - exit 1 - fi - - # Parse the COMPILE_CMD (it's space-separated) - read -r -a compile_cmd <<< "$COMPILE_CMD" - - echo "Compiling: ${compile_cmd[@]}" - - # Run compilation and capture stderr - if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then - echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" - exit 1 - fi - - # Check if executable was created - if [[ ! -f "${EXEC_FILE}" ]]; then - echo "Compilation failed: executable not created" > "${COMPILE_ERR_FILE}" - exit 1 - fi - - # Make executable if needed - chmod +x "${EXEC_FILE}" 2>/dev/null || true -fi - -read -r -a times <<< "$TIME_LIMITS_MS" -read -r -a mems <<< "$MEM_LIMITS_KB" -read -r -a inputs <<< "$INPUT_FILES" -read -r -a user_outputs <<< "$USER_OUTPUT_FILES" -read -r -a user_errors <<< "$USER_ERROR_FILES" -read -r -a exec_results <<< "$USER_EXEC_RESULT_FILES" - -# Basic count validations -if (( ${#times[@]} != ${#mems[@]} )); then - echo "ERROR: TIME_LIMITS and MEM_LIMITS must have same count" >&2 - exit 1 -fi - -n=${#inputs[@]} -if (( ${#times[@]} != n )); then - echo "ERROR: TIME/MEM count must match number of INPUT_FILES (${n})" >&2 - exit 1 -fi - -if (( ${#user_outputs[@]} != n )); then - echo "ERROR: USER_OUTPUT_FILES count must match number of INPUT_FILES" >&2 - exit 1 -fi - -if (( ${#user_errors[@]} != n )); then - echo "ERROR: USER_ERROR_FILES count must match number of INPUT_FILES" >&2 - exit 1 -fi - -if (( ${#exec_results[@]} != n )); then - echo "ERROR: USER_EXEC_RESULT_FILES count must match number of INPUT_FILES" >&2 - exit 1 -fi - -# Ensure all input files exist before running -missing=0 -for f in "${inputs[@]}"; do - if [[ ! -f "$f" ]]; then - echo "ERROR: input file not found: $f" >&2 - missing=1 - fi -done -if (( missing )); then - exit 1 -fi - -for idx in "${!inputs[@]}"; do - testno=$((idx+1)) - infile="${inputs[idx]}" - tlimit_ms=${times[idx]} - mlimit_kb=${mems[idx]} - - out="${user_outputs[idx]}" - err="${user_errors[idx]}" - exec_result="${exec_results[idx]}" - - # create parent directories if necessary - mkdir -p "$(dirname "${out}")" - mkdir -p "$(dirname "${err}")" - mkdir -p "$(dirname "${exec_result}")" - - # run in subshell to isolate failure - ( - # temp file to store peak memory from /usr/bin/time - peak_mem_file=$(mktemp) - - tlimit_s=$(awk "BEGIN {print ${tlimit_ms}/1000}") - - start_ns=$(date +%s%N) - - # Run the command with timeout and track peak memory usage - hard_kb=$(( mlimit_kb + mlimit_kb / 10 )) - timeout --preserve-status "${tlimit_s}s" \ - /usr/bin/time -f "%M" -o "${peak_mem_file}" \ - bash -c "ulimit -S -v ${mlimit_kb} && ulimit -H -v ${hard_kb} && ${RUN_CMD}" < "$infile" \ - > "${out}" 2> "${err}" - code=$? - - end_ns=$(date +%s%N) - - if [[ ! -s "${peak_mem_file}" ]]; then - peak_kb=0 - else - peak_kb=$(grep -o '[0-9]*' "${peak_mem_file}" | tail -n 1) - fi - rm -f "${peak_mem_file}" - - sig=0 - if (( code > 128 )); then - sig=$((code - 128)) - fi - - is_mle=0 - if (( sig == 15 )); then - echo "Time limit exceeded after ${tlimit_ms}ms" >> "${err}" - # SIGKILL(9), SIGSEGV(11), SIGABRT(6), SIGBUS(10) possibly mem limit exceeded, but this is NOT guaranteed by POSIX. - elif (( sig == 9 || sig == 11 || sig == 6 || sig == 10 )); then - # Require that we were close to the memory limit - threshold_kb=$(( mlimit_kb * 9 / 10 )) - if (( peak_kb >= threshold_kb )); then - is_mle=1 - fi - fi - - if ((is_mle)); then - echo "Memory limit exceeded max ${mlimit_kb}KB" >> "${err}" - code=134 - fi - - duration_ns=$((end_ns - start_ns)) - duration_sec=$(awk "BEGIN {printf \"%.6f\", ${duration_ns}/1000000000}") - - # Write exit code, duration, and peak memory (KB) to exec result file - echo "$code $duration_sec $peak_kb" > "${exec_result}" - ) - -done diff --git a/internal/stages/executor/run_tests.sh b/internal/stages/executor/run_tests.sh index b39b927..e7bfa77 100644 --- a/internal/stages/executor/run_tests.sh +++ b/internal/stages/executor/run_tests.sh @@ -48,7 +48,6 @@ if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then exit 1 fi - # Parse the COMPILE_CMD (it's space-separated) read -r -a compile_cmd <<< "$COMPILE_CMD" echo "Compiling: ${compile_cmd[@]}" @@ -56,13 +55,13 @@ if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then # Run compilation and capture stderr if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" - exit 1 + exit 0 fi # Check if executable was created if [[ ! -f "${EXEC_FILE}" ]]; then echo "Compilation failed: executable not created" > "${COMPILE_ERR_FILE}" - exit 1 + exit 0 fi # Make executable if needed From 3ee2f5e3d5dcf93e6dc5f846a3b11efac9588f87 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 11:46:01 +0100 Subject: [PATCH 16/18] fix: compilation error detection --- internal/docker/docker_client.go | 4 +++- internal/pipeline/pipeline.go | 13 +++++++------ internal/stages/executor/executor.go | 6 ++++++ utils/utils.go | 17 ++++++++++++++--- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/internal/docker/docker_client.go b/internal/docker/docker_client.go index f5576a4..c361844 100644 --- a/internal/docker/docker_client.go +++ b/internal/docker/docker_client.go @@ -31,6 +31,7 @@ type DockerClient interface { ctx context.Context, containerID, srcPath, dstPath string, allowedDirs []string, + alwaysCopyFiles []string, maxFileSize int64, maxFilesInDir int, ) error @@ -151,6 +152,7 @@ func (d *dockerClient) CopyFromContainerFiltered( ctx context.Context, containerID, srcPath, dstPath string, allowedDirs []string, + alwaysCopyFiles []string, maxFileSize int64, maxFilesInDir int, ) error { @@ -163,7 +165,7 @@ func (d *dockerClient) CopyFromContainerFiltered( defer reader.Close() // Extract tar archive with filtering to destination - err = utils.ExtractTarArchiveFiltered(reader, dstPath, allowedDirs, maxFileSize, maxFilesInDir) + err = utils.ExtractTarArchiveFiltered(reader, dstPath, allowedDirs, alwaysCopyFiles, maxFileSize, maxFilesInDir) if err != nil { d.logger.Errorf("Failed to extract filtered tar archive to %s: %s", dstPath, err) } diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index 028f5a5..a1eedda 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -153,11 +153,6 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta err = ws.executor.ExecuteCommand(cfg) if err != nil { - if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { - ws.publishCompilationError(dc, task.TestCases) - return - } - ws.responder.PublishTaskErrorToResponseQueue( constants.QueueMessageTypeTask, ws.state.ProcessingMessageID, @@ -166,7 +161,12 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta ) return } - + + // Check for compilation error + if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { + ws.publishCompilationError(dc, task.TestCases) + return + } solutionResult := ws.verifier.EvaluateAllTestCases(dc, task.TestCases, messageID, langType) err = ws.packager.SendSolutionPackage(dc, task.TestCases /*hasCompilationErr*/, false, messageID) @@ -190,6 +190,7 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta } func (ws *worker) publishCompilationError(dirConfig *packager.TaskDirConfig, testCases []messages.TestCase) { + ws.logger.Infof("Compilation error occured for message ID: %s", ws.state.ProcessingMessageID) sendErr := ws.packager.SendSolutionPackage(dirConfig, testCases, true, ws.state.ProcessingMessageID) if sendErr != nil { ws.responder.PublishTaskErrorToResponseQueue( diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index d324bbf..6006941 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -138,12 +138,18 @@ func (d *executor) ExecuteCommand( constants.UserDiffDirName, constants.UserExecResultDirName, } + + alwaysCopyFiles := []string{ + filepath.Base(cfg.DirConfig.CompileErrFilePath), + } + err = d.docker.CopyFromContainerFiltered( copyCtx, containerID, cfg.DirConfig.PackageDirPath, cfg.DirConfig.TmpDirPath, allowedDirs, + alwaysCopyFiles, constants.MaxContainerOutputFileSize, len(cfg.TestCases), ) diff --git a/utils/utils.go b/utils/utils.go index 4d83334..5c27726 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -351,6 +351,7 @@ func ExtractTarArchiveFiltered( reader io.Reader, dstPath string, allowedDirs []string, + alwaysCopyFiles []string, maxFileSize int64, maxFilesInDir int, ) error { @@ -366,6 +367,11 @@ func ExtractTarArchiveFiltered( allowedSet[dir] = struct{}{} } + alwaysCopySet := make(map[string]struct{}, len(alwaysCopyFiles)) + for _, file := range alwaysCopyFiles { + alwaysCopySet[file] = struct{}{} + } + // Track files per allowed directory to enforce per-directory limits dirFileCount := make(map[string]int, len(allowedDirs)) @@ -378,7 +384,10 @@ func ExtractTarArchiveFiltered( return err } - if !isAllowedDirectory(header, allowedSet) { + // Check if this file should always be copied + _, alwaysCopy := alwaysCopySet[filepath.Base(header.Name)] + + if !alwaysCopy && !isAllowedDirectory(header, allowedSet) { continue } @@ -390,8 +399,10 @@ func ExtractTarArchiveFiltered( return err } - if err := validateAndTrackFileCount(header, dirFileCount, maxFilesInDir); err != nil { - return err + if !alwaysCopy { + if err := validateAndTrackFileCount(header, dirFileCount, maxFilesInDir); err != nil { + return err + } } target, err := safeArchiveTarget(absDstPath, header.Name) From af5a42bde4898a61142151bedcba0a67e84a0873 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 12:00:58 +0100 Subject: [PATCH 17/18] tests --- generate_mocks.sh | 1 - internal/pipeline/pipeline.go | 2 +- internal/pipeline/pipeline_test.go | 204 ++++++++++++++++------ internal/scheduler/scheduler_test.go | 9 +- internal/stages/executor/executor.go | 2 + internal/stages/executor/executor_test.go | 8 +- tests/mocks/mocks_generated.go | 56 +----- utils/utils.go | 105 ++++++----- 8 files changed, 232 insertions(+), 155 deletions(-) diff --git a/generate_mocks.sh b/generate_mocks.sh index 24f192c..59bce8b 100755 --- a/generate_mocks.sh +++ b/generate_mocks.sh @@ -12,7 +12,6 @@ INTERFACES=( "internal/rabbitmq/channel Channel" "internal/rabbitmq/responder Responder" "internal/scheduler Scheduler" - "internal/stages/compiler Compiler" "internal/stages/executor Executor" "internal/stages/packager Packager" "internal/stages/verifier Verifier" diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index a1eedda..601efb0 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -161,7 +161,7 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta ) return } - + // Check for compilation error if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { ws.publishCompilationError(dc, task.TestCases) diff --git a/internal/pipeline/pipeline_test.go b/internal/pipeline/pipeline_test.go index 2e963f5..6033a6d 100644 --- a/internal/pipeline/pipeline_test.go +++ b/internal/pipeline/pipeline_test.go @@ -2,13 +2,13 @@ package pipeline_test import ( "errors" + "os" "testing" "time" "github.com/mini-maxit/worker/internal/pipeline" "github.com/mini-maxit/worker/internal/stages/packager" "github.com/mini-maxit/worker/pkg/constants" - pkgerrors "github.com/mini-maxit/worker/pkg/errors" "github.com/mini-maxit/worker/pkg/messages" "github.com/mini-maxit/worker/pkg/solution" mocks "github.com/mini-maxit/worker/tests/mocks" @@ -18,24 +18,19 @@ import ( // setupSuccessfulPipelineMocks configures mocks for a successful task processing flow. func setupSuccessfulPipelineMocks( t *testing.T, - mockCompiler *mocks.MockCompiler, mockPackager *mocks.MockPackager, mockExecutor *mocks.MockExecutor, mockVerifier *mocks.MockVerifier, mockResponder *mocks.MockResponder, ) { + tmpDir := t.TempDir() dir := &packager.TaskDirConfig{ - PackageDirPath: t.TempDir(), + PackageDirPath: tmpDir, UserSolutionPath: "src", UserExecFilePath: "exec", CompileErrFilePath: "compile.err", } mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) - mockCompiler.EXPECT(). - CompileSolutionIfNeeded( - gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(nil) mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).Return(nil) mockVerifier.EXPECT(). EvaluateAllTestCases(dir, gomock.Any(), gomock.Any(), gomock.Any()). @@ -52,15 +47,14 @@ func TestProcessTask_SuccessFlow(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) - setupSuccessfulPipelineMocks(t, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + setupSuccessfulPipelineMocks(t, mockPackager, mockExecutor, mockVerifier, mockResponder) - w := pipeline.NewWorker(1, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(1, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{ LanguageType: "cpp", @@ -80,28 +74,32 @@ func TestProcessTask_CompilationErrorFlow(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) + tmpDir := t.TempDir() dir := &packager.TaskDirConfig{ - PackageDirPath: t.TempDir(), + PackageDirPath: tmpDir, UserSolutionPath: "src", UserExecFilePath: "exec", - CompileErrFilePath: "compile.err", + CompileErrFilePath: tmpDir + "/compile.err", } mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) - // Simulate compilation failure - mockCompiler.EXPECT(). - CompileSolutionIfNeeded( - gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(pkgerrors.ErrCompilationFailed) + // Executor returns nil (container exited with 0), but we have compilation errors in the file + mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).DoAndReturn( + func(_ interface{}) error { + // Simulate compilation error by writing to the error file + if err := os.WriteFile(dir.CompileErrFilePath, []byte("undefined reference to `main'"), 0644); err != nil { + t.Fatalf("failed to write compile error: %v", err) + } + return nil + }, + ) - // When compilation fails SendSolutionPackage should be called with hasCompilationErr=true + // When compilation error detected, SendSolutionPackage should be called with hasCompilationErr=true mockPackager.EXPECT().SendSolutionPackage(dir, gomock.Any(), true, gomock.Any()).Return(nil) // Expect PublishPayloadTaskRespond called with a compilation error result @@ -116,7 +114,7 @@ func TestProcessTask_CompilationErrorFlow(t *testing.T) { }, ) - w := pipeline.NewWorker(2, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(2, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{LanguageType: "cpp", LanguageVersion: "11", TestCases: nil} w.ProcessTask("msg-compile", "respQ", task) } @@ -125,7 +123,6 @@ func TestProcessTask_PreparePackageFails(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) @@ -138,7 +135,7 @@ func TestProcessTask_PreparePackageFails(t *testing.T) { ).Return(nil, errors.New("download failed")) mockResponder.EXPECT().PublishTaskErrorToResponseQueue(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - w := pipeline.NewWorker(3, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(3, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{LanguageType: "cpp"} w.ProcessTask("msg-dl", "respQ", task) } @@ -147,24 +144,19 @@ func TestProcessTask_SendPackageFailsAfterRun(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) + tmpDir := t.TempDir() dir := &packager.TaskDirConfig{ - PackageDirPath: t.TempDir(), + PackageDirPath: tmpDir, UserSolutionPath: "src", UserExecFilePath: "exec", - CompileErrFilePath: "compile.err", + CompileErrFilePath: tmpDir + "/compile.err", } mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) - mockCompiler.EXPECT(). - CompileSolutionIfNeeded( - gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(nil) mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).Return(nil) mockVerifier.EXPECT(). EvaluateAllTestCases(dir, gomock.Any(), gomock.Any(), gomock.Any()). @@ -177,7 +169,7 @@ func TestProcessTask_SendPackageFailsAfterRun(t *testing.T) { mockPackager.EXPECT().SendSolutionPackage(dir, gomock.Any(), false, gomock.Any()).Return(errors.New("upload failed")) mockResponder.EXPECT().PublishTaskErrorToResponseQueue(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - w := pipeline.NewWorker(4, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(4, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{LanguageType: "cpp"} w.ProcessTask("msg-upload", "respQ", task) } @@ -186,24 +178,19 @@ func TestProcessTask_VerifierPanicRecovered(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) + tmpDir := t.TempDir() dir := &packager.TaskDirConfig{ - PackageDirPath: t.TempDir(), + PackageDirPath: tmpDir, UserSolutionPath: "src", UserExecFilePath: "exec", - CompileErrFilePath: "compile.err", + CompileErrFilePath: tmpDir + "/compile.err", } mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) - mockCompiler.EXPECT(). - CompileSolutionIfNeeded( - gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(nil) mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).Return(nil) // Make verifier panic @@ -215,7 +202,7 @@ func TestProcessTask_VerifierPanicRecovered(t *testing.T) { mockResponder.EXPECT().PublishTaskErrorToResponseQueue(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) - w := pipeline.NewWorker(5, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(5, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{LanguageType: "cpp"} w.ProcessTask("msg-panic", "respQ", task) } @@ -224,15 +211,14 @@ func TestProcessTask_PublishPayloadFails(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) - setupSuccessfulPipelineMocks(t, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + setupSuccessfulPipelineMocks(t, mockPackager, mockExecutor, mockVerifier, mockResponder) - w := pipeline.NewWorker(6, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(6, mockPackager, mockExecutor, mockVerifier, mockResponder) task := &messages.TaskQueueMessage{ LanguageType: "cpp", LanguageVersion: "11", @@ -251,13 +237,12 @@ func TestGetStatus(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) - w := pipeline.NewWorker(7, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(7, mockPackager, mockExecutor, mockVerifier, mockResponder) if status := w.GetState(); status.Status != constants.WorkerStatusIdle { t.Fatalf("expected initial status to be Idle, got %q", status) @@ -278,13 +263,12 @@ func TestGetProcessingMessageID(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockCompiler := mocks.NewMockCompiler(ctrl) mockPackager := mocks.NewMockPackager(ctrl) mockExecutor := mocks.NewMockExecutor(ctrl) mockVerifier := mocks.NewMockVerifier(ctrl) mockResponder := mocks.NewMockResponder(ctrl) - w := pipeline.NewWorker(8, mockCompiler, mockPackager, mockExecutor, mockVerifier, mockResponder) + w := pipeline.NewWorker(8, mockPackager, mockExecutor, mockVerifier, mockResponder) if msgID := w.GetProcessingMessageID(); msgID != "" { t.Fatalf("expected initial processingMessageID to be empty, got %q", msgID) @@ -312,11 +296,6 @@ func TestGetProcessingMessageID(t *testing.T) { ) // The rest of the pipeline should succeed quickly after release - mockCompiler.EXPECT(). - CompileSolutionIfNeeded( - gomock.Any(), gomock.Any(), gomock.Any(), - gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(nil) mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).Return(nil) mockVerifier.EXPECT(). EvaluateAllTestCases(dir, gomock.Any(), gomock.Any(), gomock.Any()). @@ -359,3 +338,120 @@ func TestGetProcessingMessageID(t *testing.T) { } } } + +// TestProcessTask_ContainerCompilationSuccess tests compilation happening inside container. +func TestProcessTask_ContainerCompilationSuccess(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockPackager := mocks.NewMockPackager(ctrl) + mockExecutor := mocks.NewMockExecutor(ctrl) + mockVerifier := mocks.NewMockVerifier(ctrl) + mockResponder := mocks.NewMockResponder(ctrl) + + tmpDir := t.TempDir() + dir := &packager.TaskDirConfig{ + PackageDirPath: tmpDir, + UserSolutionPath: tmpDir + "/solution.cpp", + UserExecFilePath: tmpDir + "/solution", + CompileErrFilePath: tmpDir + "/compile.err", + } + + mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) + + // Executor returns nil (compilation successful in container, empty error file) + mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).DoAndReturn( + func(cfg interface{}) error { + // Write empty error file (successful compilation) + if err := os.WriteFile(dir.CompileErrFilePath, []byte{}, 0644); err != nil { + t.Fatalf("failed to write empty compile error file: %v", err) + } + return nil + }, + ) + + // Should proceed to verification + mockVerifier.EXPECT(). + EvaluateAllTestCases(dir, gomock.Any(), gomock.Any(), gomock.Any()). + Return(solution.Result{ + StatusCode: solution.Success, + Message: "OK", + }) + + mockPackager.EXPECT().SendSolutionPackage(dir, gomock.Any(), false, gomock.Any()).Return(nil) + mockResponder.EXPECT().PublishPayloadTaskRespond(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) + + w := pipeline.NewWorker(9, mockPackager, mockExecutor, mockVerifier, mockResponder) + + task := &messages.TaskQueueMessage{ + LanguageType: "cpp", + LanguageVersion: "11", + TestCases: []messages.TestCase{ + {TimeLimitMs: 100, MemoryLimitKB: 65536}, + }, + } + + w.ProcessTask("msg-container-success", "respQ", task) + + if got := w.GetProcessingMessageID(); got != "" { + t.Fatalf("expected processingMessageID to be cleared, got %q", got) + } +} + +// TestProcessTask_ContainerCompilationErrorDetection tests compilation error detection from container output. +func TestProcessTask_ContainerCompilationErrorDetection(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockPackager := mocks.NewMockPackager(ctrl) + mockExecutor := mocks.NewMockExecutor(ctrl) + mockVerifier := mocks.NewMockVerifier(ctrl) + mockResponder := mocks.NewMockResponder(ctrl) + + tmpDir := t.TempDir() + dir := &packager.TaskDirConfig{ + PackageDirPath: tmpDir, + UserSolutionPath: tmpDir + "/solution.cpp", + UserExecFilePath: tmpDir + "/solution", + CompileErrFilePath: tmpDir + "/compile.err", + } + + mockPackager.EXPECT().PrepareSolutionPackage(gomock.Any(), gomock.Any(), gomock.Any()).Return(dir, nil) + + // Executor succeeds (exit code 0), but has compilation error output + mockExecutor.EXPECT().ExecuteCommand(gomock.Any()).DoAndReturn( + func(cfg interface{}) error { + // Write compilation error message + errMsg := "solution.cpp:5:1: error: 'main' function not defined\n" + if err := os.WriteFile(dir.CompileErrFilePath, []byte(errMsg), 0644); err != nil { + t.Fatalf("failed to write compile error file: %v", err) + } + return nil + }, + ) + + // Should NOT proceed to verification, should report compilation error + mockPackager.EXPECT().SendSolutionPackage(dir, gomock.Any(), true, gomock.Any()).Return(nil) + + mockResponder.EXPECT().PublishPayloadTaskRespond(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(messageType, messageID, responseQueue string, res solution.Result) { + if res.StatusCode != solution.CompilationError { + t.Fatalf("expected compilation error status, got %v", res.StatusCode) + } + }, + ) + + w := pipeline.NewWorker(10, mockPackager, mockExecutor, mockVerifier, mockResponder) + + task := &messages.TaskQueueMessage{ + LanguageType: "cpp", + LanguageVersion: "11", + TestCases: []messages.TestCase{{TimeLimitMs: 100, MemoryLimitKB: 65536}}, + } + + w.ProcessTask("msg-container-error", "respQ", task) + + if got := w.GetProcessingMessageID(); got != "" { + t.Fatalf("expected processingMessageID to be cleared, got %q", got) + } +} diff --git a/internal/scheduler/scheduler_test.go b/internal/scheduler/scheduler_test.go index 664e39c..fd97a1b 100644 --- a/internal/scheduler/scheduler_test.go +++ b/internal/scheduler/scheduler_test.go @@ -19,14 +19,13 @@ func TestNewScheduler(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - compiler := mocktests.NewMockCompiler(ctrl) packager := mocktests.NewMockPackager(ctrl) executor := mocktests.NewMockExecutor(ctrl) verifier := mocktests.NewMockVerifier(ctrl) responder := mocktests.NewMockResponder(ctrl) maxWorkers := 3 - s := NewScheduler(maxWorkers, compiler, packager, executor, verifier, responder) + s := NewScheduler(maxWorkers, packager, executor, verifier, responder) if s == nil { t.Fatalf("NewScheduler returned nil") } @@ -53,7 +52,7 @@ func TestGetWorkersStatus(t *testing.T) { w1.EXPECT().GetState().Return(pipeline.WorkerState{Status: constants.WorkerStatusIdle}).Times(1) w1.EXPECT().GetId().Return(1).Times(1) - s := NewSchedulerWithWorkers(2, map[int]pipeline.Worker{0: w0, 1: w1}, nil, nil, nil, nil, nil) + s := NewSchedulerWithWorkers(2, map[int]pipeline.Worker{0: w0, 1: w1}, nil, nil, nil, nil) st := s.GetWorkersStatus() if len(st.WorkerStatus) != 2 { @@ -111,7 +110,7 @@ func TestProcessTask_SuccessAndMarkIdle(t *testing.T) { // After processing, scheduler.markWorkerAsIdle should call UpdateStatus(Idle) w.EXPECT().UpdateStatus(constants.WorkerStatusIdle).Times(1) - s := NewSchedulerWithWorkers(1, map[int]pipeline.Worker{0: w}, nil, nil, nil, nil, nil) + s := NewSchedulerWithWorkers(1, map[int]pipeline.Worker{0: w}, nil, nil, nil, nil) if err := s.ProcessTask("resp", "msg-id-1", &messages.TaskQueueMessage{}); err != nil { t.Fatalf("unexpected error from ProcessTask: %v", err) @@ -137,7 +136,7 @@ func TestProcessTask_NoFreeWorker(t *testing.T) { // worker reports busy w.EXPECT().GetState().Return(pipeline.WorkerState{Status: constants.WorkerStatusBusy}).Times(1) - s := NewSchedulerWithWorkers(1, map[int]pipeline.Worker{0: w}, nil, nil, nil, nil, nil) + s := NewSchedulerWithWorkers(1, map[int]pipeline.Worker{0: w}, nil, nil, nil, nil) err := s.ProcessTask("resp", "msg-id-2", &messages.TaskQueueMessage{}) if err == nil { diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 6006941..1a27852 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -200,6 +200,8 @@ func buildCompileCommand(cfg CommandConfig) []string { "-std=" + versionFlag, filepath.Base(cfg.SourceFilePath), } + case languages.PYTHON: // make linter happy. + return []string{} default: return []string{} } diff --git a/internal/stages/executor/executor_test.go b/internal/stages/executor/executor_test.go index fbbef7a..02cb478 100644 --- a/internal/stages/executor/executor_test.go +++ b/internal/stages/executor/executor_test.go @@ -78,7 +78,7 @@ func setupMockExpectations( gomock.Any(), containerID, gomock.Any(), ).Return(statusCode, nil).Times(1), mockDocker.EXPECT().CopyFromContainerFiltered( - gomock.Any(), containerID, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), containerID, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(nil).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), containerID).Times(1), ) @@ -118,7 +118,7 @@ func TestExecuteCommand_Success(t *testing.T) { gomock.Any(), "cid123", gomock.Any(), ).Return(int64(0), nil).Times(1), mockDocker.EXPECT().CopyFromContainerFiltered( - gomock.Any(), "cid123", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), "cid123", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(nil).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), "cid123").Times(1), ) @@ -320,7 +320,9 @@ func TestExecuteCommand_CopyFromContainerFails(t *testing.T) { gomock.Any(), "cid-copyfrom-fail", gomock.Any(), ).Return(int64(0), nil).Times(1), mockDocker.EXPECT().CopyFromContainerFiltered( - gomock.Any(), "cid-copyfrom-fail", gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), + "cid-copyfrom-fail", + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), ).Return(errors.New("copy-from-failed")).Times(1), mockDocker.EXPECT().ContainerRemove(gomock.Any(), "cid-copyfrom-fail").Times(1), ) diff --git a/tests/mocks/mocks_generated.go b/tests/mocks/mocks_generated.go index 4cdd0db..bd9a786 100644 --- a/tests/mocks/mocks_generated.go +++ b/tests/mocks/mocks_generated.go @@ -281,54 +281,6 @@ func (mr *MockSchedulerMockRecorder) ProcessTask(responseQueueName, messageID, t return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessTask", reflect.TypeOf((*MockScheduler)(nil).ProcessTask), responseQueueName, messageID, task) } -// Code generated by MockGen. DO NOT EDIT. -// Source: /Users/mateuszosik/repos/Testerka/worker/internal/stages/compiler (interfaces: Compiler) -// -// Generated by this command: -// -// mockgen /Users/mateuszosik/repos/Testerka/worker/internal/stages/compiler Compiler -// - -// Package mock_compiler is a generated GoMock package. - -// MockCompiler is a mock of Compiler interface. -type MockCompiler struct { - ctrl *gomock.Controller - recorder *MockCompilerMockRecorder - isgomock struct{} -} - -// MockCompilerMockRecorder is the mock recorder for MockCompiler. -type MockCompilerMockRecorder struct { - mock *MockCompiler -} - -// NewMockCompiler creates a new mock instance. -func NewMockCompiler(ctrl *gomock.Controller) *MockCompiler { - mock := &MockCompiler{ctrl: ctrl} - mock.recorder = &MockCompilerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockCompiler) EXPECT() *MockCompilerMockRecorder { - return m.recorder -} - -// CompileSolutionIfNeeded mocks base method. -func (m *MockCompiler) CompileSolutionIfNeeded(langType languages.LanguageType, langVersion, sourceFilePath, outFilePath, compErrFilePath, messageID string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompileSolutionIfNeeded", langType, langVersion, sourceFilePath, outFilePath, compErrFilePath, messageID) - ret0, _ := ret[0].(error) - return ret0 -} - -// CompileSolutionIfNeeded indicates an expected call of CompileSolutionIfNeeded. -func (mr *MockCompilerMockRecorder) CompileSolutionIfNeeded(langType, langVersion, sourceFilePath, outFilePath, compErrFilePath, messageID any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompileSolutionIfNeeded", reflect.TypeOf((*MockCompiler)(nil).CompileSolutionIfNeeded), langType, langVersion, sourceFilePath, outFilePath, compErrFilePath, messageID) -} - // Code generated by MockGen. DO NOT EDIT. // Source: /Users/mateuszosik/repos/Testerka/worker/internal/stages/executor (interfaces: Executor) // @@ -710,17 +662,17 @@ func (mr *MockDockerClientMockRecorder) ContainerRemove(ctx, containerID any) *g } // CopyFromContainerFiltered mocks base method. -func (m *MockDockerClient) CopyFromContainerFiltered(ctx context.Context, containerID, srcPath, dstPath string, allowedDirs []string, maxFileSize int64, maxFilesInDir int) error { +func (m *MockDockerClient) CopyFromContainerFiltered(ctx context.Context, containerID, srcPath, dstPath string, allowedDirs, alwaysCopyFiles []string, maxFileSize int64, maxFilesInDir int) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyFromContainerFiltered", ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir) + ret := m.ctrl.Call(m, "CopyFromContainerFiltered", ctx, containerID, srcPath, dstPath, allowedDirs, alwaysCopyFiles, maxFileSize, maxFilesInDir) ret0, _ := ret[0].(error) return ret0 } // CopyFromContainerFiltered indicates an expected call of CopyFromContainerFiltered. -func (mr *MockDockerClientMockRecorder) CopyFromContainerFiltered(ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir any) *gomock.Call { +func (mr *MockDockerClientMockRecorder) CopyFromContainerFiltered(ctx, containerID, srcPath, dstPath, allowedDirs, alwaysCopyFiles, maxFileSize, maxFilesInDir any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyFromContainerFiltered", reflect.TypeOf((*MockDockerClient)(nil).CopyFromContainerFiltered), ctx, containerID, srcPath, dstPath, allowedDirs, maxFileSize, maxFilesInDir) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyFromContainerFiltered", reflect.TypeOf((*MockDockerClient)(nil).CopyFromContainerFiltered), ctx, containerID, srcPath, dstPath, allowedDirs, alwaysCopyFiles, maxFileSize, maxFilesInDir) } // CopyToContainerFiltered mocks base method. diff --git a/utils/utils.go b/utils/utils.go index 5c27726..f6bc3a7 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -347,6 +347,56 @@ func validateAndTrackFileCount(header *tar.Header, dirFileCount map[string]int, return nil } +type tarExtractionContext struct { + absDstPath string + allowedSet map[string]struct{} + alwaysCopySet map[string]struct{} + dirFileCount map[string]int + maxFileSize int64 + maxFilesInDir int +} + +func (ctx *tarExtractionContext) shouldExtractEntry(header *tar.Header) bool { + _, alwaysCopy := ctx.alwaysCopySet[filepath.Base(header.Name)] + return alwaysCopy || isAllowedDirectory(header, ctx.allowedSet) +} + +func (ctx *tarExtractionContext) validateEntry(header *tar.Header) error { + if err := validateTarEntrySize(header, ctx.maxFileSize); err != nil { + return err + } + + if err := validatePathDepth(header.Name); err != nil { + return err + } + + _, alwaysCopy := ctx.alwaysCopySet[filepath.Base(header.Name)] + if !alwaysCopy { + if err := validateAndTrackFileCount(header, ctx.dirFileCount, ctx.maxFilesInDir); err != nil { + return err + } + } + + return nil +} + +func (ctx *tarExtractionContext) processEntry(tarReader *tar.Reader, header *tar.Header) error { + if !ctx.shouldExtractEntry(header) { + return nil + } + + if err := ctx.validateEntry(header); err != nil { + return err + } + + target, err := safeArchiveTarget(ctx.absDstPath, header.Name) + if err != nil { + return err + } + + return materializeTarEntry(tarReader, header, target) +} + func ExtractTarArchiveFiltered( reader io.Reader, dstPath string, @@ -355,26 +405,21 @@ func ExtractTarArchiveFiltered( maxFileSize int64, maxFilesInDir int, ) error { - tarReader := tar.NewReader(reader) - absDstPath, err := filepath.Abs(dstPath) if err != nil { return err } - allowedSet := make(map[string]struct{}, len(allowedDirs)) - for _, dir := range allowedDirs { - allowedSet[dir] = struct{}{} + ctx := &tarExtractionContext{ + absDstPath: absDstPath, + allowedSet: makeStringSet(allowedDirs), + alwaysCopySet: makeStringSet(alwaysCopyFiles), + dirFileCount: make(map[string]int, len(allowedDirs)), + maxFileSize: maxFileSize, + maxFilesInDir: maxFilesInDir, } - alwaysCopySet := make(map[string]struct{}, len(alwaysCopyFiles)) - for _, file := range alwaysCopyFiles { - alwaysCopySet[file] = struct{}{} - } - - // Track files per allowed directory to enforce per-directory limits - dirFileCount := make(map[string]int, len(allowedDirs)) - + tarReader := tar.NewReader(reader) for { header, err := tarReader.Next() if errors.Is(err, io.EOF) { @@ -384,34 +429,16 @@ func ExtractTarArchiveFiltered( return err } - // Check if this file should always be copied - _, alwaysCopy := alwaysCopySet[filepath.Base(header.Name)] - - if !alwaysCopy && !isAllowedDirectory(header, allowedSet) { - continue - } - - if err := validateTarEntrySize(header, maxFileSize); err != nil { - return err - } - - if err := validatePathDepth(header.Name); err != nil { - return err - } - - if !alwaysCopy { - if err := validateAndTrackFileCount(header, dirFileCount, maxFilesInDir); err != nil { - return err - } - } - - target, err := safeArchiveTarget(absDstPath, header.Name) - if err != nil { + if err := ctx.processEntry(tarReader, header); err != nil { return err } + } +} - if err := materializeTarEntry(tarReader, header, target); err != nil { - return err - } +func makeStringSet(items []string) map[string]struct{} { + set := make(map[string]struct{}, len(items)) + for _, item := range items { + set[item] = struct{}{} } + return set } From d3e33718e0182ff0024af5aa8d5c02c49019e5d8 Mon Sep 17 00:00:00 2001 From: Matios102 Date: Tue, 30 Dec 2025 15:56:25 +0100 Subject: [PATCH 18/18] fix: improve compilation error handling and update buildCompileCommand signature --- internal/pipeline/pipeline.go | 15 ++++++++++----- internal/stages/executor/executor.go | 18 ++++++++++++------ internal/stages/executor/run_tests.sh | 5 +++++ 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/internal/pipeline/pipeline.go b/internal/pipeline/pipeline.go index 601efb0..9915786 100644 --- a/internal/pipeline/pipeline.go +++ b/internal/pipeline/pipeline.go @@ -162,11 +162,16 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta return } - // Check for compilation error - if fileInfo, statErr := os.Stat(dc.CompileErrFilePath); statErr == nil && fileInfo.Size() > 0 { - ws.publishCompilationError(dc, task.TestCases) - return + if requiresCompilation { + // Check for compilation error + fileInfo, statErr := os.Stat(dc.CompileErrFilePath) + + if statErr == nil && fileInfo.Size() > 0 { + ws.publishCompilationError(dc, task.TestCases) + return + } } + solutionResult := ws.verifier.EvaluateAllTestCases(dc, task.TestCases, messageID, langType) err = ws.packager.SendSolutionPackage(dc, task.TestCases /*hasCompilationErr*/, false, messageID) @@ -190,7 +195,7 @@ func (ws *worker) ProcessTask(messageID, responseQueue string, task *messages.Ta } func (ws *worker) publishCompilationError(dirConfig *packager.TaskDirConfig, testCases []messages.TestCase) { - ws.logger.Infof("Compilation error occured for message ID: %s", ws.state.ProcessingMessageID) + ws.logger.Infof("Compilation error occurred for message ID: %s", ws.state.ProcessingMessageID) sendErr := ws.packager.SendSolutionPackage(dirConfig, testCases, true, ws.state.ProcessingMessageID) if sendErr != nil { ws.responder.PublishTaskErrorToResponseQueue( diff --git a/internal/stages/executor/executor.go b/internal/stages/executor/executor.go index 1a27852..76c251a 100644 --- a/internal/stages/executor/executor.go +++ b/internal/stages/executor/executor.go @@ -189,21 +189,24 @@ func SanitizeContainerName(raw string) string { return "submission-" + cleaned } -func buildCompileCommand(cfg CommandConfig) []string { +func buildCompileCommand(cfg CommandConfig) ([]string, error) { switch cfg.LanguageType { case languages.CPP: - versionFlag, _ := languages.GetVersionFlag(languages.CPP, cfg.LanguageVersion) + versionFlag, err := languages.GetVersionFlag(languages.CPP, cfg.LanguageVersion) + if err != nil { + return []string{}, err + } return []string{ "g++", "-o", filepath.Base(cfg.DirConfig.UserExecFilePath), "-std=" + versionFlag, filepath.Base(cfg.SourceFilePath), - } + }, nil case languages.PYTHON: // make linter happy. - return []string{} + return []string{}, nil default: - return []string{} + return []string{}, nil } } @@ -257,7 +260,10 @@ func buildEnvironmentVariables(cfg CommandConfig) ([]string, error) { } if cfg.RequiresCompiling { - compileCmd := buildCompileCommand(cfg) + compileCmd, err := buildCompileCommand(cfg) + if err != nil { + return nil, err + } envVars = append(envVars, "REQUIRES_COMPILATION=true", "COMPILE_CMD="+utils.ShellQuoteSlice(compileCmd), diff --git a/internal/stages/executor/run_tests.sh b/internal/stages/executor/run_tests.sh index e7bfa77..a0cf28e 100644 --- a/internal/stages/executor/run_tests.sh +++ b/internal/stages/executor/run_tests.sh @@ -55,12 +55,17 @@ if [[ "${REQUIRES_COMPILATION:-}" == "true" ]]; then # Run compilation and capture stderr if ! "${compile_cmd[@]}" 2> "${COMPILE_ERR_FILE}"; then echo "Compilation failed. Error details saved to ${COMPILE_ERR_FILE}" + + # Intenially exit with 0 as this is not treated as a container failure, + # compilation errors are handled separately in the pipeline by inspecting the compile error file. exit 0 fi # Check if executable was created if [[ ! -f "${EXEC_FILE}" ]]; then echo "Compilation failed: executable not created" > "${COMPILE_ERR_FILE}" + + # Same as above with exit 0 fi