From e996a233804db0ba835d8b58fc6c6e226e91ef27 Mon Sep 17 00:00:00 2001 From: divancode Date: Mon, 2 Feb 2026 00:26:01 +0300 Subject: [PATCH 1/4] feat(exesh): split execution into stages --- Exesh/cmd/coordinator/main.go | 28 +- Exesh/cmd/worker/main.go | 42 +-- Exesh/config/coordinator/dev.yml | 10 +- Exesh/config/coordinator/docker.yml | 7 +- Exesh/config/worker-1/dev.yml | 2 +- Exesh/config/worker-1/docker.yml | 2 +- Exesh/config/worker-2/dev.yml | 2 +- Exesh/config/worker-2/docker.yml | 2 +- Exesh/example/main.go | 56 +-- Exesh/internal/api/execute/api.go | 21 +- Exesh/internal/api/execute/handler.go | 2 +- Exesh/internal/api/heartbeat/api.go | 61 +-- Exesh/internal/api/heartbeat/client.go | 24 +- Exesh/internal/api/heartbeat/handler.go | 12 +- Exesh/internal/config/config.go | 7 - Exesh/internal/config/coordinator_config.go | 10 +- Exesh/internal/config/worker_config.go | 7 +- Exesh/internal/domain/execution/context.go | 86 ----- Exesh/internal/domain/execution/execution.go | 95 +++-- .../domain/execution/execution_definition.go | 56 +++ Exesh/internal/domain/execution/graph.go | 110 ------ Exesh/internal/domain/execution/id.go | 6 +- Exesh/internal/domain/execution/input.go | 28 -- .../internal/domain/execution/input/input.go | 25 ++ .../execution/input/input_definition.go | 24 ++ .../input/inputs/artifact_input_definition.go | 11 + ...ilestorage_bucket_file_input_definition.go | 11 + .../filestorage_bucket_input_definition.go | 12 + .../input/inputs/inline_input_definition.go | 11 + .../input/inputs/input_definition.go | 53 +++ .../domain/execution/inputs/artifact_input.go | 20 - .../inputs/filestorage_bucket_input.go | 24 -- .../domain/execution/inputs/inputs.go | 31 -- Exesh/internal/domain/execution/job.go | 75 ---- Exesh/internal/domain/execution/job/id.go | 42 +++ Exesh/internal/domain/execution/job/job.go | 54 +++ .../domain/execution/job/job_definition.go | 29 ++ .../execution/job/jobs/check_cpp_job.go | 47 +++ .../job/jobs/check_cpp_job_definition.go | 13 + .../execution/job/jobs/compile_cpp_job.go | 44 +++ .../job/jobs/compile_cpp_job_definition.go | 11 + .../execution/job/jobs/compile_go_job.go | 44 +++ .../job/jobs/compile_go_job_definition.go | 11 + .../internal/domain/execution/job/jobs/job.go | 79 ++++ .../execution/job/jobs/job_definition.go | 65 ++++ .../domain/execution/job/jobs/run_cpp_job.go | 56 +++ .../job/jobs/run_cpp_job_definition.go | 15 + .../domain/execution/job/jobs/run_go_job.go | 56 +++ .../job/jobs/run_go_job_definition.go | 15 + .../domain/execution/job/jobs/run_py_job.go | 56 +++ .../job/jobs/run_py_job_definition.go | 15 + .../domain/execution/jobs/check_cpp_job.go | 68 ---- .../domain/execution/jobs/compile_cpp_job.go | 58 --- .../domain/execution/jobs/compile_go_job.go | 58 --- Exesh/internal/domain/execution/jobs/jobs.go | 59 --- .../domain/execution/jobs/run_cpp_job.go | 83 ----- .../domain/execution/jobs/run_go_job.go | 84 ----- .../domain/execution/jobs/run_py_job.go | 83 ----- Exesh/internal/domain/execution/jobs_graph.go | 82 +++++ Exesh/internal/domain/execution/message.go | 33 -- .../domain/execution/message/message.go | 34 ++ .../message/messages/check_job_message.go | 30 ++ .../message/messages/compile_job_message.go | 48 +++ .../messages/finish_execution_message.go | 34 ++ .../execution/message/messages/message.go | 39 ++ .../message/messages/run_job_message.go | 49 +++ .../messages/start_execution_message.go | 21 ++ .../execution/messages/check_step_message.go | 27 -- .../messages/compile_step_message.go | 43 --- .../messages/finish_execution_message.go | 27 -- .../execution/messages/run_step_message.go | 44 --- .../messages/start_execution_message.go | 16 - Exesh/internal/domain/execution/output.go | 27 -- .../domain/execution/output/output.go | 11 + .../execution/outputs/artifact_output.go | 18 - .../domain/execution/outputs/outputs.go | 29 -- Exesh/internal/domain/execution/result.go | 54 --- .../domain/execution/result/result.go | 56 +++ .../execution/result/results/check_result.go | 50 +++ .../result/results/compile_result.go | 52 +++ .../domain/execution/result/results/result.go | 47 +++ .../execution/result/results/run_result.go | 98 +++++ .../domain/execution/results/check_result.go | 21 -- .../execution/results/compile_result.go | 22 -- .../domain/execution/results/results.go | 53 --- .../domain/execution/results/run_result.go | 27 -- Exesh/internal/domain/execution/source.go | 23 -- Exesh/internal/domain/execution/source/id.go | 42 +++ .../domain/execution/source/source.go | 28 ++ .../execution/source/source_definition.go | 30 ++ .../sources/filestorage_bucket_file_source.go | 28 ++ ...estorage_bucket_file_source_definition.go} | 6 +- .../filestorage_bucket_source_definition.go | 13 + .../execution/source/sources/inline_source.go | 22 ++ .../sources/inline_source_definition.go | 10 + .../domain/execution/source/sources/source.go | 41 +++ .../source/sources/source_definition.go | 47 +++ .../domain/execution/sources/inline_source.go | 10 - .../execution/sources/other_step_source.go | 10 - .../domain/execution/sources/sources.go | 33 -- Exesh/internal/domain/execution/stage.go | 18 + .../domain/execution/stage_definition.go | 9 + .../internal/domain/execution/stages_graph.go | 115 ++++++ Exesh/internal/domain/execution/step.go | 37 -- .../domain/execution/steps/check_cpp_step.go | 55 --- .../execution/steps/compile_cpp_step.go | 45 --- .../domain/execution/steps/compile_go_step.go | 45 --- .../domain/execution/steps/run_cpp_step.go | 63 ---- .../domain/execution/steps/run_go_step.go | 63 ---- .../domain/execution/steps/run_py_step.go | 63 ---- .../internal/domain/execution/steps/steps.go | 74 ---- .../executors/check_cpp_job_executor.go | 86 ++--- .../executors/compile_cpp_job_executor.go | 70 +--- .../executors/compile_go_job_executor.go | 71 +--- .../internal/executor/executors/executors.go | 15 +- .../executors/run_cpp_job_executor.go | 113 ++---- .../executor/executors/run_go_job_executor.go | 117 ++---- .../executor/executors/run_py_job_executor.go | 113 ++---- Exesh/internal/executor/job_executor.go | 24 +- Exesh/internal/factory/execution_factory.go | 348 ++++++++++++++++++ Exesh/internal/factory/job_factory.go | 271 -------------- Exesh/internal/factory/message_factory.go | 76 ++-- .../adapter/filestorage_adapter.go | 111 +++--- Exesh/internal/provider/input_provider.go | 61 --- Exesh/internal/provider/output_provider.go | 73 ++-- Exesh/internal/provider/provider.go | 17 + .../artifact_source_input_provider.go | 133 ------- .../artifact_source_output_provider.go | 110 ------ .../filestorage_bucket_input_provider.go | 101 ----- Exesh/internal/provider/source_provider.go | 133 +++++++ Exesh/internal/registry/artifact_registry.go | 12 +- .../internal/scheduler/execution_scheduler.go | 268 ++++++++------ Exesh/internal/scheduler/job_scheduler.go | 70 ++-- Exesh/internal/sender/message_sender.go | 4 +- .../storage/postgres/execution_storage.go | 86 ++--- .../internal/storage/postgres/unit_of_work.go | 2 +- Exesh/internal/usecase/execute/dto.go | 8 +- Exesh/internal/usecase/execute/usecase.go | 35 +- Exesh/internal/usecase/heartbeat/usecase.go | 27 +- Exesh/internal/worker/worker.go | 43 ++- 140 files changed, 3190 insertions(+), 3548 deletions(-) delete mode 100644 Exesh/internal/domain/execution/context.go create mode 100644 Exesh/internal/domain/execution/execution_definition.go delete mode 100644 Exesh/internal/domain/execution/graph.go delete mode 100644 Exesh/internal/domain/execution/input.go create mode 100644 Exesh/internal/domain/execution/input/input.go create mode 100644 Exesh/internal/domain/execution/input/input_definition.go create mode 100644 Exesh/internal/domain/execution/input/inputs/artifact_input_definition.go create mode 100644 Exesh/internal/domain/execution/input/inputs/filestorage_bucket_file_input_definition.go create mode 100644 Exesh/internal/domain/execution/input/inputs/filestorage_bucket_input_definition.go create mode 100644 Exesh/internal/domain/execution/input/inputs/inline_input_definition.go create mode 100644 Exesh/internal/domain/execution/input/inputs/input_definition.go delete mode 100644 Exesh/internal/domain/execution/inputs/artifact_input.go delete mode 100644 Exesh/internal/domain/execution/inputs/filestorage_bucket_input.go delete mode 100644 Exesh/internal/domain/execution/inputs/inputs.go delete mode 100644 Exesh/internal/domain/execution/job.go create mode 100644 Exesh/internal/domain/execution/job/id.go create mode 100644 Exesh/internal/domain/execution/job/job.go create mode 100644 Exesh/internal/domain/execution/job/job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/check_cpp_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/check_cpp_job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/compile_cpp_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/compile_cpp_job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/compile_go_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/compile_go_job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_cpp_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_cpp_job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_go_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_go_job_definition.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_py_job.go create mode 100644 Exesh/internal/domain/execution/job/jobs/run_py_job_definition.go delete mode 100644 Exesh/internal/domain/execution/jobs/check_cpp_job.go delete mode 100644 Exesh/internal/domain/execution/jobs/compile_cpp_job.go delete mode 100644 Exesh/internal/domain/execution/jobs/compile_go_job.go delete mode 100644 Exesh/internal/domain/execution/jobs/jobs.go delete mode 100644 Exesh/internal/domain/execution/jobs/run_cpp_job.go delete mode 100644 Exesh/internal/domain/execution/jobs/run_go_job.go delete mode 100644 Exesh/internal/domain/execution/jobs/run_py_job.go create mode 100644 Exesh/internal/domain/execution/jobs_graph.go delete mode 100644 Exesh/internal/domain/execution/message.go create mode 100644 Exesh/internal/domain/execution/message/message.go create mode 100644 Exesh/internal/domain/execution/message/messages/check_job_message.go create mode 100644 Exesh/internal/domain/execution/message/messages/compile_job_message.go create mode 100644 Exesh/internal/domain/execution/message/messages/finish_execution_message.go create mode 100644 Exesh/internal/domain/execution/message/messages/message.go create mode 100644 Exesh/internal/domain/execution/message/messages/run_job_message.go create mode 100644 Exesh/internal/domain/execution/message/messages/start_execution_message.go delete mode 100644 Exesh/internal/domain/execution/messages/check_step_message.go delete mode 100644 Exesh/internal/domain/execution/messages/compile_step_message.go delete mode 100644 Exesh/internal/domain/execution/messages/finish_execution_message.go delete mode 100644 Exesh/internal/domain/execution/messages/run_step_message.go delete mode 100644 Exesh/internal/domain/execution/messages/start_execution_message.go delete mode 100644 Exesh/internal/domain/execution/output.go create mode 100644 Exesh/internal/domain/execution/output/output.go delete mode 100644 Exesh/internal/domain/execution/outputs/artifact_output.go delete mode 100644 Exesh/internal/domain/execution/outputs/outputs.go delete mode 100644 Exesh/internal/domain/execution/result.go create mode 100644 Exesh/internal/domain/execution/result/result.go create mode 100644 Exesh/internal/domain/execution/result/results/check_result.go create mode 100644 Exesh/internal/domain/execution/result/results/compile_result.go create mode 100644 Exesh/internal/domain/execution/result/results/result.go create mode 100644 Exesh/internal/domain/execution/result/results/run_result.go delete mode 100644 Exesh/internal/domain/execution/results/check_result.go delete mode 100644 Exesh/internal/domain/execution/results/compile_result.go delete mode 100644 Exesh/internal/domain/execution/results/results.go delete mode 100644 Exesh/internal/domain/execution/results/run_result.go delete mode 100644 Exesh/internal/domain/execution/source.go create mode 100644 Exesh/internal/domain/execution/source/id.go create mode 100644 Exesh/internal/domain/execution/source/source.go create mode 100644 Exesh/internal/domain/execution/source/source_definition.go create mode 100644 Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source.go rename Exesh/internal/domain/execution/{sources/filestorage_bucket_source.go => source/sources/filestorage_bucket_file_source_definition.go} (65%) create mode 100644 Exesh/internal/domain/execution/source/sources/filestorage_bucket_source_definition.go create mode 100644 Exesh/internal/domain/execution/source/sources/inline_source.go create mode 100644 Exesh/internal/domain/execution/source/sources/inline_source_definition.go create mode 100644 Exesh/internal/domain/execution/source/sources/source.go create mode 100644 Exesh/internal/domain/execution/source/sources/source_definition.go delete mode 100644 Exesh/internal/domain/execution/sources/inline_source.go delete mode 100644 Exesh/internal/domain/execution/sources/other_step_source.go delete mode 100644 Exesh/internal/domain/execution/sources/sources.go create mode 100644 Exesh/internal/domain/execution/stage.go create mode 100644 Exesh/internal/domain/execution/stage_definition.go create mode 100644 Exesh/internal/domain/execution/stages_graph.go delete mode 100644 Exesh/internal/domain/execution/step.go delete mode 100644 Exesh/internal/domain/execution/steps/check_cpp_step.go delete mode 100644 Exesh/internal/domain/execution/steps/compile_cpp_step.go delete mode 100644 Exesh/internal/domain/execution/steps/compile_go_step.go delete mode 100644 Exesh/internal/domain/execution/steps/run_cpp_step.go delete mode 100644 Exesh/internal/domain/execution/steps/run_go_step.go delete mode 100644 Exesh/internal/domain/execution/steps/run_py_step.go delete mode 100644 Exesh/internal/domain/execution/steps/steps.go create mode 100644 Exesh/internal/factory/execution_factory.go delete mode 100644 Exesh/internal/factory/job_factory.go rename Exesh/internal/provider/{providers => }/adapter/filestorage_adapter.go (61%) delete mode 100644 Exesh/internal/provider/input_provider.go create mode 100644 Exesh/internal/provider/provider.go delete mode 100644 Exesh/internal/provider/providers/artifact_source_input_provider.go delete mode 100644 Exesh/internal/provider/providers/artifact_source_output_provider.go delete mode 100644 Exesh/internal/provider/providers/filestorage_bucket_input_provider.go create mode 100644 Exesh/internal/provider/source_provider.go diff --git a/Exesh/cmd/coordinator/main.go b/Exesh/cmd/coordinator/main.go index f3a47e1d..b39b559f 100644 --- a/Exesh/cmd/coordinator/main.go +++ b/Exesh/cmd/coordinator/main.go @@ -8,9 +8,7 @@ import ( "exesh/internal/config" "exesh/internal/factory" "exesh/internal/pool" - "exesh/internal/provider" - "exesh/internal/provider/providers" - "exesh/internal/provider/providers/adapter" + "exesh/internal/provider/adapter" "exesh/internal/registry" schedule "exesh/internal/scheduler" "exesh/internal/sender" @@ -57,24 +55,22 @@ func main() { return } - filestorage, err := filestorage.New(log, cfg.FileStorage, mux) + fs, err := filestorage.New(log, cfg.FileStorage, mux) if err != nil { log.Error("failed to create filestorage", slog.String("error", err.Error())) return } - defer filestorage.Shutdown() - - filestorageAdapter := adapter.NewFilestorageAdapter(filestorage) - inputProvider := setupInputProvider(cfg.InputProvider, filestorageAdapter) + defer fs.Shutdown() workerPool := pool.NewWorkerPool(log, cfg.WorkerPool) defer workerPool.StopObservers() artifactRegistry := registry.NewArtifactRegistry(log, cfg.ArtifactRegistry, workerPool) - jobFactory := factory.NewJobFactory(log, cfg.JobFactory, artifactRegistry, inputProvider) + filestorageAdapter := adapter.NewFilestorageAdapter(fs) + executionFactory := factory.NewExecutionFactory(cfg.JobFactory, filestorageAdapter) - messageFactory := factory.NewMessageFactory(log) + messageFactory := factory.NewMessageFactory() messageSender := sender.NewKafkaSender(log, cfg.Sender, unitOfWork, outboxStorage) messageSender.Start(ctx) @@ -83,13 +79,13 @@ func main() { collectors.NewGoCollector(), collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), ) - promCoordReg := prometheus.WrapRegistererWithPrefix("coduels_exesh_coordinator_", promRegistry) + promCoordinatorRegistry := prometheus.WrapRegistererWithPrefix("coduels_exesh_coordinator_", promRegistry) jobScheduler := schedule.NewJobScheduler(log) executionScheduler := schedule.NewExecutionScheduler(log, cfg.ExecutionScheduler, unitOfWork, executionStorage, - jobFactory, jobScheduler, messageFactory, messageSender) + executionFactory, artifactRegistry, jobScheduler, messageFactory, messageSender) - err = executionScheduler.RegisterMetrics(promCoordReg) + err = executionScheduler.RegisterMetrics(promCoordinatorRegistry) if err != nil { log.Error("could not register metrics from execution scheduler", slog.Any("err", err)) return @@ -181,9 +177,3 @@ func setupStorage(log *slog.Logger, cfg config.StorageConfig) ( return unitOfWork, executionStorage, outboxStorage, err } - -func setupInputProvider(cfg config.InputProviderConfig, filestorageAdapter *adapter.FilestorageAdapter) *provider.InputProvider { - filestorageBucketInputProvider := providers.NewFilestorageBucketInputProvider(filestorageAdapter, cfg.FilestorageBucketTTL) - artifactInputProvider := providers.NewArtifactInputProvider(filestorageAdapter, cfg.ArtifactTTL) - return provider.NewInputProvider(filestorageBucketInputProvider, artifactInputProvider) -} diff --git a/Exesh/cmd/worker/main.go b/Exesh/cmd/worker/main.go index b9cbd869..6c661612 100644 --- a/Exesh/cmd/worker/main.go +++ b/Exesh/cmd/worker/main.go @@ -7,8 +7,7 @@ import ( "exesh/internal/executor" "exesh/internal/executor/executors" "exesh/internal/provider" - "exesh/internal/provider/providers" - "exesh/internal/provider/providers/adapter" + "exesh/internal/provider/adapter" "exesh/internal/runtime/docker" "exesh/internal/worker" "fmt" @@ -46,23 +45,23 @@ func main() { mux := chi.NewRouter() - filestorage, err := filestorage.New(log, cfg.FileStorage, mux) + fs, err := filestorage.New(log, cfg.FileStorage, mux) if err != nil { log.Error("failed to create filestorage", slog.String("error", err.Error())) return } - defer filestorage.Shutdown() + defer fs.Shutdown() - filestorageAdapter := adapter.NewFilestorageAdapter(filestorage) - inputProvider := setupInputProvider(cfg.InputProvider, filestorageAdapter) - outputProvider := setupOutputProvider(cfg.OutputProvider, filestorageAdapter) + filestorageAdapter := adapter.NewFilestorageAdapter(fs) + sourceProvider := provider.NewSourceProvider(cfg.SourceProvider, filestorageAdapter) + outputProvider := provider.NewOutputProvider(cfg.OutputProvider, filestorageAdapter) - jobExecutor, err := setupJobExecutor(log, inputProvider, outputProvider) + jobExecutor, err := setupJobExecutor(log, sourceProvider, outputProvider) if err != nil { flog.Fatal(err) } - worker.NewWorker(log, cfg.Worker, jobExecutor).Start(ctx) + worker.NewWorker(log, cfg.Worker, sourceProvider, jobExecutor).Start(ctx) promRegistry := prometheus.NewRegistry() promRegistry.MustRegister( @@ -121,18 +120,7 @@ func setupLogger(env string) (log *slog.Logger, err error) { return log, err } -func setupInputProvider(cfg config.InputProviderConfig, filestorageAdapter *adapter.FilestorageAdapter) *provider.InputProvider { - filestorageBucketInputProvider := providers.NewFilestorageBucketInputProvider(filestorageAdapter, cfg.FilestorageBucketTTL) - artifactInputProvider := providers.NewArtifactInputProvider(filestorageAdapter, cfg.ArtifactTTL) - return provider.NewInputProvider(filestorageBucketInputProvider, artifactInputProvider) -} - -func setupOutputProvider(cfg config.OutputProviderConfig, filestorageAdapter *adapter.FilestorageAdapter) *provider.OutputProvider { - artifactOutputProvider := providers.NewArtifactOutputProvider(filestorageAdapter, cfg.ArtifactTTL) - return provider.NewOutputProvider(artifactOutputProvider) -} - -func setupJobExecutor(log *slog.Logger, inputProvider *provider.InputProvider, outputProvider *provider.OutputProvider) (*executor.JobExecutor, error) { +func setupJobExecutor(log *slog.Logger, sourceProvider *provider.SourceProvider, outputProvider *provider.OutputProvider) (*executor.JobExecutor, error) { gccRT, err := docker.New( docker.WithDefaultClient(), docker.WithBaseImage("gcc"), @@ -157,11 +145,11 @@ func setupJobExecutor(log *slog.Logger, inputProvider *provider.InputProvider, o if err != nil { return nil, fmt.Errorf("create python runtime: %w", err) } - compileCppJobExecutor := executors.NewCompileCppJobExecutor(log, inputProvider, outputProvider, gccRT) - compileGoJobExecutor := executors.NewCompileGoJobExecutor(log, inputProvider, outputProvider, goRT) - runCppJobExecutor := executors.NewRunCppJobExecutor(log, inputProvider, outputProvider, gccRT) - runPyJobExecutor := executors.NewRunPyJobExecutor(log, inputProvider, outputProvider, pyRT) - runGoJobExecutor := executors.NewRunGoJobExecutor(log, inputProvider, outputProvider, goRT) - checkCppJobExecutor := executors.NewCheckCppJobExecutor(log, inputProvider, outputProvider, gccRT) + compileCppJobExecutor := executors.NewCompileCppJobExecutor(log, sourceProvider, outputProvider, gccRT) + compileGoJobExecutor := executors.NewCompileGoJobExecutor(log, sourceProvider, outputProvider, goRT) + runCppJobExecutor := executors.NewRunCppJobExecutor(log, sourceProvider, outputProvider, gccRT) + runPyJobExecutor := executors.NewRunPyJobExecutor(log, sourceProvider, outputProvider, pyRT) + runGoJobExecutor := executors.NewRunGoJobExecutor(log, sourceProvider, outputProvider, goRT) + checkCppJobExecutor := executors.NewCheckCppJobExecutor(log, sourceProvider, outputProvider, gccRT) return executor.NewJobExecutor(compileCppJobExecutor, compileGoJobExecutor, runCppJobExecutor, runPyJobExecutor, runGoJobExecutor, checkCppJobExecutor), nil } diff --git a/Exesh/config/coordinator/dev.yml b/Exesh/config/coordinator/dev.yml index 4b5db66f..77e08e88 100644 --- a/Exesh/config/coordinator/dev.yml +++ b/Exesh/config/coordinator/dev.yml @@ -10,15 +10,13 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: - filestorage_bucket_ttl: 15m - artifact_ttl: 5m job_factory: output: - compiled_cpp: bin + compiled_binary: bin run_output: output - check_verdict: verdict - filestorage_endpoint: http://localhost:5253 + source_ttl: + filestorage_bucket: 30m + inline: 30m execution_scheduler: executions_interval: 5s max_concurrency: 10 diff --git a/Exesh/config/coordinator/docker.yml b/Exesh/config/coordinator/docker.yml index 6ab4802d..571056f2 100644 --- a/Exesh/config/coordinator/docker.yml +++ b/Exesh/config/coordinator/docker.yml @@ -11,15 +11,10 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: - filestorage_bucket_ttl: 15m - artifact_ttl: 5m job_factory: output: - compiled_cpp: bin + compiled_binary: bin run_output: output - check_verdict: verdict - filestorage_endpoint: http://coordinator:5253 execution_scheduler: executions_interval: 3s max_concurrency: 10 diff --git a/Exesh/config/worker-1/dev.yml b/Exesh/config/worker-1/dev.yml index b05358a8..95b72cb2 100644 --- a/Exesh/config/worker-1/dev.yml +++ b/Exesh/config/worker-1/dev.yml @@ -7,7 +7,7 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: +source_provider: filestorage_bucket_ttl: 15m artifact_ttl: 5m output_provider: diff --git a/Exesh/config/worker-1/docker.yml b/Exesh/config/worker-1/docker.yml index cd45d5f5..4ac83391 100644 --- a/Exesh/config/worker-1/docker.yml +++ b/Exesh/config/worker-1/docker.yml @@ -8,7 +8,7 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: +source_provider: filestorage_bucket_ttl: 15m artifact_ttl: 5m output_provider: diff --git a/Exesh/config/worker-2/dev.yml b/Exesh/config/worker-2/dev.yml index 9744d46e..e9d786c2 100644 --- a/Exesh/config/worker-2/dev.yml +++ b/Exesh/config/worker-2/dev.yml @@ -7,7 +7,7 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: +source_provider: filestorage_bucket_ttl: 15m artifact_ttl: 5m output_provider: diff --git a/Exesh/config/worker-2/docker.yml b/Exesh/config/worker-2/docker.yml index 32ec503d..5e3b4397 100644 --- a/Exesh/config/worker-2/docker.yml +++ b/Exesh/config/worker-2/docker.yml @@ -8,7 +8,7 @@ filestorage: workers: 1 collector_iterations_delay: 60 worker_iterations_delay: 5 -input_provider: +source_provider: filestorage_bucket_ttl: 15m artifact_ttl: 5m output_provider: diff --git a/Exesh/example/main.go b/Exesh/example/main.go index f4123871..69c62b7d 100644 --- a/Exesh/example/main.go +++ b/Exesh/example/main.go @@ -4,10 +4,10 @@ package main import ( "context" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/outputs" + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" + jobs2 "exesh/internal/domain/execution/job/jobs" "exesh/internal/executor/executors" "exesh/internal/runtime/docker" "fmt" @@ -19,7 +19,7 @@ import ( type dummyInputProvider struct{} -func (dp *dummyInputProvider) Create(ctx context.Context, in execution.Input) (w io.Writer, commit, abort func() error, err error) { +func (dp *dummyInputProvider) Create(ctx context.Context, in input.Input) (w io.Writer, commit, abort func() error, err error) { commit = func() error { return nil } abort = func() error { return nil } f, err := os.OpenFile(in.GetFile(), os.O_CREATE|os.O_RDWR, 0o755) @@ -31,12 +31,12 @@ func (dp *dummyInputProvider) Create(ctx context.Context, in execution.Input) (w return f, commit, abort, err } -func (dp *dummyInputProvider) Locate(ctx context.Context, in execution.Input) (path string, unlock func(), err error) { +func (dp *dummyInputProvider) Locate(ctx context.Context, in input.Input) (path string, unlock func(), err error) { unlock = func() {} return in.GetFile(), func() {}, nil } -func (dp *dummyInputProvider) Read(ctx context.Context, in execution.Input) (r io.Reader, unlock func(), err error) { +func (dp *dummyInputProvider) Read(ctx context.Context, in input.Input) (r io.Reader, unlock func(), err error) { unlock = func() {} f, err := os.OpenFile(in.GetFile(), os.O_RDONLY, 0o755) if err != nil { @@ -48,7 +48,7 @@ func (dp *dummyInputProvider) Read(ctx context.Context, in execution.Input) (r i type dummyOutputProvider struct{} -func (dp *dummyOutputProvider) Create(ctx context.Context, out execution.Output) (w io.Writer, commit, abort func() error, err error) { +func (dp *dummyOutputProvider) Create(ctx context.Context, out output.Output) (w io.Writer, commit, abort func() error, err error) { commit = func() error { return nil } abort = func() error { return nil } f, err := os.OpenFile(out.GetFile(), os.O_CREATE|os.O_RDWR, 0o755) @@ -60,16 +60,16 @@ func (dp *dummyOutputProvider) Create(ctx context.Context, out execution.Output) return f, commit, abort, err } -func (dp *dummyOutputProvider) Locate(ctx context.Context, out execution.Output) (path string, unlock func(), err error) { +func (dp *dummyOutputProvider) Locate(ctx context.Context, out output.Output) (path string, unlock func(), err error) { unlock = func() {} return out.GetFile(), func() {}, nil } -func (dp *dummyOutputProvider) Reserve(ctx context.Context, out execution.Output) (path string, unlock func() error, smth func() error, err error) { +func (dp *dummyOutputProvider) Reserve(ctx context.Context, out output.Output) (path string, unlock func() error, smth func() error, err error) { return out.GetFile(), func() error { return nil }, func() error { return nil }, nil } -func (dp *dummyOutputProvider) Read(ctx context.Context, out execution.Output) (r io.Reader, unlock func(), err error) { +func (dp *dummyOutputProvider) Read(ctx context.Context, out output.Output) (r io.Reader, unlock func(), err error) { unlock = func() {} f, err := os.OpenFile(out.GetFile(), os.O_RDONLY, 0o755) if err != nil { @@ -88,9 +88,9 @@ func Ref[T any](t T) *T { } func main() { - compileJobId := execution.JobID([]byte("1234567890123456789012345678901234567890")) - runJobId := execution.JobID([]byte("0123456789012345678901234567890123456789")) - checkJobId := execution.JobID([]byte("9012345678901234567890123456789012345678")) + compileJobId := job.JobID([]byte("1234567890123456789012345678901234567890")) + runJobId := job.JobID([]byte("0123456789012345678901234567890123456789")) + checkJobId := job.JobID([]byte("9012345678901234567890123456789012345678")) workerID := "worker-id" rt, err := docker.New( docker.WithDefaultClient(), @@ -102,32 +102,32 @@ func main() { } compileExecutor := executors.NewCompileCppJobExecutor(slog.Default(), &dummyInputProvider{}, &dummyOutputProvider{}, rt) - compilationResult := compileExecutor.Execute(context.Background(), Ref(jobs.NewCompileCppJob( + compilationResult := compileExecutor.Execute(context.Background(), Ref(jobs2.NewCompileCppJob( compileJobId, - inputs.NewArtifactInput("main.cpp", compileJobId, workerID), - outputs.NewArtifactOutput("a.out", compileJobId), + inputs.NewArtifactInput("main.cpp", compileJobId), + output.NewArtifactOutput("a.out", compileJobId), ))) fmt.Printf("compile: %#v\n", compilationResult) - checkerCompilationResult := compileExecutor.Execute(context.Background(), Ref(jobs.NewCompileCppJob( + checkerCompilationResult := compileExecutor.Execute(context.Background(), Ref(jobs2.NewCompileCppJob( compileJobId, - inputs.NewArtifactInput("checker.cpp", compileJobId, workerID), - outputs.NewArtifactOutput("a.checker.out", compileJobId), + inputs.NewArtifactInput("checker.cpp", compileJobId), + output.NewArtifactOutput("a.checker.out", compileJobId), ))) fmt.Printf("compile checker: %#v\n", checkerCompilationResult) runExecutor := executors.NewRunCppJobExecutor(slog.Default(), &dummyInputProvider{}, &dummyOutputProvider{}, rt) - runResult := runExecutor.Execute(context.Background(), Ref(jobs.NewRunCppJob( - runJobId, inputs.NewArtifactInput("a.out", runJobId, workerID), - inputs.NewArtifactInput("in.txt", runJobId, workerID), - outputs.NewArtifactOutput("out.txt", runJobId), 0, 0, true))) + runResult := runExecutor.Execute(context.Background(), Ref(jobs2.NewRunCppJob( + runJobId, inputs.NewArtifactInput("a.out", runJobId), + inputs.NewArtifactInput("in.txt", runJobId), + output.NewArtifactOutput("out.txt", runJobId), 0, 0, true))) fmt.Printf("run: %#v\n", runResult) checkExecutor := executors.NewCheckCppJobExecutor(slog.Default(), &dummyInputProvider{}, &dummyOutputProvider{}, rt) - checkResult := checkExecutor.Execute(context.Background(), Ref(jobs.NewCheckCppJob( - runJobId, inputs.NewArtifactInput("a.checker.out", checkJobId, workerID), - inputs.NewArtifactInput("correct.txt", checkJobId, workerID), - inputs.NewArtifactInput("out.txt", checkJobId, workerID), + checkResult := checkExecutor.Execute(context.Background(), Ref(jobs2.NewCheckCppJob( + runJobId, inputs.NewArtifactInput("a.checker.out", checkJobId), + inputs.NewArtifactInput("correct.txt", checkJobId), + inputs.NewArtifactInput("out.txt", checkJobId), ))) fmt.Printf("check: %#v\n", checkResult) } diff --git a/Exesh/internal/api/execute/api.go b/Exesh/internal/api/execute/api.go index 3f683a58..85ee6d67 100644 --- a/Exesh/internal/api/execute/api.go +++ b/Exesh/internal/api/execute/api.go @@ -1,15 +1,15 @@ package execute import ( - "encoding/json" "exesh/internal/api" "exesh/internal/domain/execution" - "exesh/internal/domain/execution/steps" + "exesh/internal/domain/execution/source/sources" ) type ( Request struct { - Steps []execution.Step `json:"steps"` + Sources []sources.Definition `json:"sources"` + Stages []execution.StageDefinition `json:"stages"` } Response struct { @@ -17,18 +17,3 @@ type ( ExecutionID *execution.ID `json:"execution_id,omitempty"` } ) - -func (r *Request) UnmarshalJSON(data []byte) error { - req := struct { - Steps json.RawMessage `json:"steps"` - }{} - if err := json.Unmarshal(data, &req); err != nil { - return err - } - var err error - r.Steps, err = steps.UnmarshalStepsJSON(req.Steps) - if err != nil { - return err - } - return nil -} diff --git a/Exesh/internal/api/execute/handler.go b/Exesh/internal/api/execute/handler.go index 0fcca8ec..7b109dc4 100644 --- a/Exesh/internal/api/execute/handler.go +++ b/Exesh/internal/api/execute/handler.go @@ -36,7 +36,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { return } - command := execute.Command{Steps: req.Steps} + command := execute.Command{Sources: req.Sources, Stages: req.Stages} result, err := h.uc.Execute(r.Context(), command) if err != nil { h.log.Error("failed to execute", slog.Any("err", err)) diff --git a/Exesh/internal/api/heartbeat/api.go b/Exesh/internal/api/heartbeat/api.go index 6581e541..58361365 100644 --- a/Exesh/internal/api/heartbeat/api.go +++ b/Exesh/internal/api/heartbeat/api.go @@ -1,67 +1,22 @@ package heartbeat import ( - "encoding/json" "exesh/internal/api" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" - "fmt" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" ) type ( Request struct { - WorkerID string `json:"worker_id"` - DoneJobs []execution.Result `json:"done_jobs"` - // AddedArtifacts []ArtifactDto `json:"added_artifacts"` - FreeSlots int `json:"free_slots"` + WorkerID string `json:"worker_id"` + DoneJobs []results.Result `json:"done_jobs"` + FreeSlots int `json:"free_slots"` } Response struct { api.Response - Jobs []execution.Job `json:"jobs,omitempty"` + Jobs []jobs.Job `json:"jobs,omitempty"` + Sources []sources.Source `json:"sources,omitempty"` } - - // ArtifactDto struct { - // JobID execution.JobID `json:"job_id"` - // TrashTime time.Time `json:"trash_time"` - // } ) - -func (r *Request) UnmarshalJSON(data []byte) (err error) { - attributes := struct { - WorkerID string `json:"worker_id"` - DoneJobs json.RawMessage `json:"done_jobs"` - FreeSlots int `json:"free_slots"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal request attributes: %w", err) - } - - r.WorkerID = attributes.WorkerID - if r.DoneJobs, err = results.UnmarshalResultsJSON(attributes.DoneJobs); err != nil { - return fmt.Errorf("failed to unmarshal results: %w", err) - } - r.FreeSlots = attributes.FreeSlots - return nil -} - -func (r *Response) UnmarshalJSON(data []byte) (err error) { - attributes := struct { - api.Response - Jobs json.RawMessage `json:"jobs,omitempty"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal response attributes: %w", err) - } - - r.Response = attributes.Response - if attributes.Jobs != nil { - if r.Jobs, err = jobs.UnmarshalJobsJSON(attributes.Jobs); err != nil { - return err - } - } else { - r.Jobs = []execution.Job{} - } - return nil -} diff --git a/Exesh/internal/api/heartbeat/client.go b/Exesh/internal/api/heartbeat/client.go index 417cbff7..48b113d2 100644 --- a/Exesh/internal/api/heartbeat/client.go +++ b/Exesh/internal/api/heartbeat/client.go @@ -5,7 +5,9 @@ import ( "context" "encoding/json" "exesh/internal/api" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" "fmt" "io" "net/http" @@ -24,13 +26,13 @@ func NewHeartbeatClient(endpoint string) *Client { func (c *Client) Heartbeat( ctx context.Context, workerID string, - doneJobs []execution.Result, + doneJobs []results.Result, freeSlots int, -) ([]execution.Job, error) { +) ([]jobs.Job, []sources.Source, error) { req := Request{workerID, doneJobs, freeSlots} jsonReq, err := json.Marshal(req) if err != nil { - return nil, err + return nil, nil, err } httpReq, err := http.NewRequestWithContext( ctx, @@ -38,31 +40,31 @@ func (c *Client) Heartbeat( c.endpoint+"/heartbeat", bytes.NewBuffer(jsonReq)) if err != nil { - return nil, fmt.Errorf("failed to create heartheat request: %w", err) + return nil, nil, fmt.Errorf("failed to create heartheat request: %w", err) } httpClient := http.Client{} httpResp, err := httpClient.Do(httpReq) if err != nil { - return nil, fmt.Errorf("failed to send heartheat request: %w", err) + return nil, nil, fmt.Errorf("failed to send heartheat request: %w", err) } defer func() { _ = httpResp.Body.Close() }() if httpResp.StatusCode != http.StatusOK { content, err := io.ReadAll(httpResp.Body) if err != nil { - return nil, fmt.Errorf("failed to read heartheat response: %w", err) + return nil, nil, fmt.Errorf("failed to read heartheat response: %w", err) } - return nil, fmt.Errorf("heartbeat got response error (status %d): %s", httpResp.StatusCode, string(content)) + return nil, nil, fmt.Errorf("heartbeat got response error (status %d): %s", httpResp.StatusCode, string(content)) } var resp Response if err = json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { - return nil, fmt.Errorf("failed to decode heartheat response: %w", err) + return nil, nil, fmt.Errorf("failed to decode heartheat response: %w", err) } if resp.Status != api.StatusOK { - return nil, fmt.Errorf("heartbeat got response error: %s", resp.Error) + return nil, nil, fmt.Errorf("heartbeat got response error: %s", resp.Error) } - return resp.Jobs, nil + return resp.Jobs, resp.Sources, nil } diff --git a/Exesh/internal/api/heartbeat/handler.go b/Exesh/internal/api/heartbeat/handler.go index a9fb47a9..efa08c76 100644 --- a/Exesh/internal/api/heartbeat/handler.go +++ b/Exesh/internal/api/heartbeat/handler.go @@ -3,7 +3,8 @@ package heartbeat import ( "encoding/json" "exesh/internal/api" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/source/sources" "exesh/internal/usecase/heartbeat" "log/slog" "net/http" @@ -37,7 +38,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { } command := buildCommand(req) - jobs, err := h.uc.Heartbeat(r.Context(), command) + jbs, srcs, err := h.uc.Heartbeat(r.Context(), command) if err != nil { h.log.Error("failed to process heartbeat", slog.Any("command", command), @@ -46,7 +47,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) { return } - render.JSON(w, r, okResponse(jobs)) + render.JSON(w, r, okResponse(jbs, srcs)) return } @@ -58,10 +59,11 @@ func buildCommand(req Request) heartbeat.Command { } } -func okResponse(jobs []execution.Job) Response { +func okResponse(jbs []jobs.Job, srcs []sources.Source) Response { return Response{ Response: api.OK(), - Jobs: jobs, + Jobs: jbs, + Sources: srcs, } } diff --git a/Exesh/internal/config/config.go b/Exesh/internal/config/config.go index 6fc7ee9e..bc190248 100644 --- a/Exesh/internal/config/config.go +++ b/Exesh/internal/config/config.go @@ -1,15 +1,8 @@ package config -import "time" - type ( HttpServerConfig struct { Addr string `yaml:"addr"` MetricsAddr string `yaml:"metrics_addr"` } - - InputProviderConfig struct { - FilestorageBucketTTL time.Duration `yaml:"filestorage_bucket_ttl"` - ArtifactTTL time.Duration `yaml:"artifact_ttl"` - } ) diff --git a/Exesh/internal/config/coordinator_config.go b/Exesh/internal/config/coordinator_config.go index 4913993c..b55cc51f 100644 --- a/Exesh/internal/config/coordinator_config.go +++ b/Exesh/internal/config/coordinator_config.go @@ -15,7 +15,6 @@ type ( HttpServer HttpServerConfig `yaml:"http_server"` Storage StorageConfig `yaml:"storage"` FileStorage filestorage.Config `yaml:"filestorage"` - InputProvider InputProviderConfig `yaml:"input_provider"` JobFactory JobFactoryConfig `yaml:"job_factory"` ExecutionScheduler ExecutionSchedulerConfig `yaml:"execution_scheduler"` WorkerPool WorkerPoolConfig `yaml:"worker_pool"` @@ -36,10 +35,13 @@ type ( JobFactoryConfig struct { Output struct { - CompiledCpp string `yaml:"compiled_cpp"` - RunOutput string `yaml:"run_output"` - CheckVerdict string `yaml:"check_verdict"` + CompiledBinary string `yaml:"compiled_binary"` + RunOutput string `yaml:"run_output"` } `yaml:"output"` + SourceTTL struct { + FilestorageBucket time.Duration `yaml:"filestorage_bucket"` + Inline time.Duration `yaml:"inline"` + } FilestorageEndpoint string `yaml:"filestorage_endpoint"` } diff --git a/Exesh/internal/config/worker_config.go b/Exesh/internal/config/worker_config.go index e33f58e6..c09b203a 100644 --- a/Exesh/internal/config/worker_config.go +++ b/Exesh/internal/config/worker_config.go @@ -14,11 +14,16 @@ type ( Env string `yaml:"env"` HttpServer HttpServerConfig `yaml:"http_server"` FileStorage filestorage.Config `yaml:"filestorage"` - InputProvider InputProviderConfig `yaml:"input_provider"` + SourceProvider SourceProviderConfig `yaml:"input_provider"` OutputProvider OutputProviderConfig `yaml:"output_provider"` Worker WorkConfig `yaml:"worker"` } + SourceProviderConfig struct { + FilestorageBucketTTL time.Duration `yaml:"filestorage_bucket_ttl"` + ArtifactTTL time.Duration `yaml:"artifact_ttl"` + } + OutputProviderConfig struct { ArtifactTTL time.Duration `yaml:"artifact_ttl"` } diff --git a/Exesh/internal/domain/execution/context.go b/Exesh/internal/domain/execution/context.go deleted file mode 100644 index 376256bd..00000000 --- a/Exesh/internal/domain/execution/context.go +++ /dev/null @@ -1,86 +0,0 @@ -package execution - -import ( - "crypto/sha1" - "fmt" - "sync" - - "github.com/DIvanCode/filestorage/pkg/bucket" -) - -type Context struct { - ExecutionID ID - - InlineSourcesBucketID bucket.ID - - graph *graph - - stepByJobID map[JobID]Step - jobByStepName map[StepName]Job - - mu *sync.Mutex - forceDone bool -} - -func newContext(executionID ID, graph *graph) (ctx Context, err error) { - ctx = Context{ - ExecutionID: executionID, - - graph: graph, - - stepByJobID: make(map[JobID]Step), - jobByStepName: make(map[StepName]Job), - - mu: &sync.Mutex{}, - forceDone: false, - } - - hash := sha1.New() - hash.Write([]byte(executionID.String())) - if err = ctx.InlineSourcesBucketID.FromString(fmt.Sprintf("%x", hash.Sum(nil))); err != nil { - err = fmt.Errorf("failed to create inline sources bucket id: %w", err) - return - } - - return -} - -func (c *Context) PickSteps() []Step { - if c.IsForceDone() { - return []Step{} - } - return c.graph.pickSteps() -} - -func (c *Context) ScheduledStep(step Step, job Job) { - c.stepByJobID[job.GetID()] = step - c.jobByStepName[step.GetName()] = job -} - -func (c *Context) DoneStep(stepName StepName) { - c.graph.doneStep(stepName) -} - -func (c *Context) ForceDone() { - c.mu.Lock() - defer c.mu.Unlock() - c.forceDone = true -} - -func (c *Context) IsDone() bool { - if c.IsForceDone() { - return true - } - return c.graph.isGraphDone() -} - -func (c *Context) GetJobForStep(stepName StepName) (Job, bool) { - job, ok := c.jobByStepName[stepName] - return job, ok -} - -func (c *Context) IsForceDone() bool { - c.mu.Lock() - defer c.mu.Unlock() - return c.forceDone -} diff --git a/Exesh/internal/domain/execution/execution.go b/Exesh/internal/domain/execution/execution.go index 6e9709e3..04f7ff1e 100644 --- a/Exesh/internal/domain/execution/execution.go +++ b/Exesh/internal/domain/execution/execution.go @@ -1,57 +1,82 @@ package execution import ( - "time" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/output" + "exesh/internal/domain/execution/source" + "exesh/internal/domain/execution/source/sources" + "sync" ) type ( Execution struct { - ID ID - Steps []Step - Status Status - CreatedAt time.Time - ScheduledAt *time.Time - FinishedAt *time.Time - } + Definition - Status string -) + Stages []*Stage + + JobByName map[job.DefinitionName]jobs.Job + JobDefinitionByID map[job.ID]jobs.Definition + + SourceDefinitionByName map[source.DefinitionName]sources.Definition + SourceByID map[source.ID]sources.Source + + OutputByJob map[job.ID]output.Output -const ( - StatusNewExecution Status = "new" - StatusScheduledExecution Status = "scheduled" - StatusFinishedExecution Status = "finished" + graph *stagesGraph + + mu sync.Mutex + forceFailed bool + } ) -func NewExecution(steps []Step) Execution { - return Execution{ - ID: newID(), - Steps: steps, - Status: StatusNewExecution, - CreatedAt: time.Now(), - ScheduledAt: nil, - FinishedAt: nil, +func NewExecution(def Definition) *Execution { + ex := Execution{ + Definition: def, + Stages: make([]*Stage, len(def.Stages)), + + JobByName: make(map[job.DefinitionName]jobs.Job), + JobDefinitionByID: make(map[job.ID]jobs.Definition), + + SourceDefinitionByName: make(map[source.DefinitionName]sources.Definition), + SourceByID: make(map[source.ID]sources.Source), + + OutputByJob: make(map[job.ID]output.Output), } + + return &ex +} + +func (ex *Execution) BuildGraph() { + ex.graph = newStagesGraph(ex.Stages) } -func (e *Execution) SetScheduled(scheduledAt time.Time) { - if e.Status == StatusFinishedExecution { - return +func (ex *Execution) PickJobs() []jobs.Job { + if ex.IsDone() { + return []jobs.Job{} } - e.Status = StatusScheduledExecution - e.ScheduledAt = &scheduledAt + return ex.graph.pickJobs() } -func (e *Execution) SetFinished(finishedAt time.Time) { - if e.Status == StatusFinishedExecution { - return - } +func (ex *Execution) DoneJob(jobID job.ID, jobStatus job.Status) { + ex.graph.doneJob(jobID, jobStatus) +} - e.Status = StatusFinishedExecution - e.ScheduledAt = &finishedAt +func (ex *Execution) IsDone() bool { + return ex.IsForceFailed() || ex.graph.isDone() } -func (e *Execution) BuildContext() (Context, error) { - return newContext(e.ID, newGraph(e.Steps)) +func (ex *Execution) ForceFail() { + ex.mu.Lock() + defer ex.mu.Unlock() + + ex.forceFailed = true +} + +func (ex *Execution) IsForceFailed() bool { + ex.mu.Lock() + defer ex.mu.Unlock() + + return ex.forceFailed } diff --git a/Exesh/internal/domain/execution/execution_definition.go b/Exesh/internal/domain/execution/execution_definition.go new file mode 100644 index 00000000..756e76bd --- /dev/null +++ b/Exesh/internal/domain/execution/execution_definition.go @@ -0,0 +1,56 @@ +package execution + +import ( + "exesh/internal/domain/execution/source/sources" + "time" +) + +type ( + Definition struct { + ID ID + Stages []StageDefinition + Sources []sources.Definition + Status Status + CreatedAt time.Time + ScheduledAt *time.Time + FinishedAt *time.Time + } + + Status string +) + +const ( + StatusNew Status = "new" + StatusScheduled Status = "scheduled" + StatusFinished Status = "finished" +) + +func NewExecutionDefinition(stages []StageDefinition, sources []sources.Definition) Definition { + return Definition{ + ID: newID(), + Stages: stages, + Sources: sources, + Status: StatusNew, + CreatedAt: time.Now(), + ScheduledAt: nil, + FinishedAt: nil, + } +} + +func (def *Definition) SetScheduled(scheduledAt time.Time) { + if def.Status == StatusFinished { + return + } + + def.Status = StatusScheduled + def.ScheduledAt = &scheduledAt +} + +func (def *Definition) SetFinished(finishedAt time.Time) { + if def.Status == StatusFinished { + return + } + + def.Status = StatusFinished + def.ScheduledAt = &finishedAt +} diff --git a/Exesh/internal/domain/execution/graph.go b/Exesh/internal/domain/execution/graph.go deleted file mode 100644 index 31feeadc..00000000 --- a/Exesh/internal/domain/execution/graph.go +++ /dev/null @@ -1,110 +0,0 @@ -package execution - -import ( - "slices" - "sync" -) - -type graph struct { - succSteps map[StepName][]Step - used map[StepName]any - topSortOrder []Step - - mu sync.Mutex - lastPickedStep int - isDone map[StepName]any - doneSteps int -} - -func newGraph(executionSteps []Step) *graph { - g := graph{ - succSteps: make(map[StepName][]Step), - used: make(map[StepName]any, len(executionSteps)), - - topSortOrder: make([]Step, 0, len(executionSteps)), - - mu: sync.Mutex{}, - lastPickedStep: -1, - isDone: make(map[StepName]any), - doneSteps: 0, - } - - for i := len(executionSteps) - 1; i >= 0; i-- { - step := executionSteps[i] - for _, dep := range step.GetDependencies() { - if _, ok := g.succSteps[dep]; !ok { - g.succSteps[dep] = make([]Step, 0) - } - g.succSteps[dep] = append(g.succSteps[dep], step) - } - } - - g.topSort(executionSteps) - - return &g -} - -func (graph *graph) pickSteps() []Step { - graph.mu.Lock() - defer graph.mu.Unlock() - - pickedSteps := make([]Step, 0) - for graph.lastPickedStep+1 < len(graph.topSortOrder) { - step := graph.topSortOrder[graph.lastPickedStep+1] - - canPick := true - for _, dep := range step.GetDependencies() { - if _, has := graph.isDone[dep]; !has { - canPick = false - break - } - } - - if !canPick { - break - } - - pickedSteps = append(pickedSteps, step) - graph.lastPickedStep++ - } - return pickedSteps -} - -func (graph *graph) doneStep(stepName StepName) { - graph.mu.Lock() - defer graph.mu.Unlock() - - graph.isDone[stepName] = struct{}{} - graph.doneSteps++ -} - -func (graph *graph) isGraphDone() bool { - graph.mu.Lock() - defer graph.mu.Unlock() - - return graph.doneSteps == len(graph.topSortOrder) -} - -func (g *graph) topSort(executionSteps []Step) { - for i := len(executionSteps) - 1; i >= 0; i-- { - step := executionSteps[i] - - // temp: do not change the initial order - g.topSortOrder = append(g.topSortOrder, step) - - // if _, used := g.used[step.GetName()]; !used { - // g.dfs(step) - // } - } - slices.Reverse(g.topSortOrder) -} - -func (g *graph) dfs(step Step) { - g.used[step.GetName()] = struct{}{} - for _, succStep := range g.succSteps[step.GetName()] { - if _, used := g.used[succStep.GetName()]; !used { - g.dfs(succStep) - } - } - g.topSortOrder = append(g.topSortOrder, step) -} diff --git a/Exesh/internal/domain/execution/id.go b/Exesh/internal/domain/execution/id.go index 4a984bca..9b5b66a4 100644 --- a/Exesh/internal/domain/execution/id.go +++ b/Exesh/internal/domain/execution/id.go @@ -12,8 +12,8 @@ func newID() ID { return ID(uuid.New()) } -func (id ID) String() string { - uid := uuid.UUID(id) +func (id *ID) String() string { + uid := uuid.UUID(*id) return uid.String() } @@ -26,7 +26,7 @@ func (id *ID) FromString(idStr string) (err error) { return } -func (id ID) MarshalJSON() ([]byte, error) { +func (id *ID) MarshalJSON() ([]byte, error) { return json.Marshal(id.String()) } diff --git a/Exesh/internal/domain/execution/input.go b/Exesh/internal/domain/execution/input.go deleted file mode 100644 index fb7f914d..00000000 --- a/Exesh/internal/domain/execution/input.go +++ /dev/null @@ -1,28 +0,0 @@ -package execution - -type ( - Input interface { - GetType() InputType - GetFile() string - } - - InputDetails struct { - Type InputType `json:"type"` - File string `json:"file"` - } - - InputType string -) - -const ( - ArtifactInputType InputType = "artifact" - FilestorageBucketInputType InputType = "filestorage_bucket" -) - -func (input InputDetails) GetType() InputType { - return input.Type -} - -func (input InputDetails) GetFile() string { - return input.File -} diff --git a/Exesh/internal/domain/execution/input/input.go b/Exesh/internal/domain/execution/input/input.go new file mode 100644 index 00000000..0859cd04 --- /dev/null +++ b/Exesh/internal/domain/execution/input/input.go @@ -0,0 +1,25 @@ +package input + +import "exesh/internal/domain/execution/source" + +type ( + Input struct { + Type Type `json:"type"` + SourceID source.ID `json:"source"` + } + + Type string +) + +func NewInput(inputType Type, sourceID source.ID) Input { + return Input{ + Type: inputType, + SourceID: sourceID, + } +} + +const ( + Artifact Type = "artifact" + Inline Type = "inline" + FilestorageBucketFile Type = "filestorage_bucket_file" +) diff --git a/Exesh/internal/domain/execution/input/input_definition.go b/Exesh/internal/domain/execution/input/input_definition.go new file mode 100644 index 00000000..7647eedd --- /dev/null +++ b/Exesh/internal/domain/execution/input/input_definition.go @@ -0,0 +1,24 @@ +package input + +type ( + IDefinition interface { + GetType() DefinitionType + } + + DefinitionDetails struct { + Type DefinitionType `json:"type"` + } + + DefinitionType string +) + +const ( + ArtifactDefinition DefinitionType = "artifact" + InlineDefinition DefinitionType = "inline" + FilestorageBucketDefinition DefinitionType = "filestorage_bucket" + FilestorageBucketFileDefinition DefinitionType = "filestorage_bucket_file" +) + +func (def *DefinitionDetails) GetType() DefinitionType { + return def.Type +} diff --git a/Exesh/internal/domain/execution/input/inputs/artifact_input_definition.go b/Exesh/internal/domain/execution/input/inputs/artifact_input_definition.go new file mode 100644 index 00000000..5ead3216 --- /dev/null +++ b/Exesh/internal/domain/execution/input/inputs/artifact_input_definition.go @@ -0,0 +1,11 @@ +package inputs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" +) + +type ArtifactInputDefinition struct { + input.DefinitionDetails + JobDefinitionName job.DefinitionName `json:"job"` +} diff --git a/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_file_input_definition.go b/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_file_input_definition.go new file mode 100644 index 00000000..bd778812 --- /dev/null +++ b/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_file_input_definition.go @@ -0,0 +1,11 @@ +package inputs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/source" +) + +type FilestorageBucketFileInputDefinition struct { + input.DefinitionDetails + SourceDefinitionName source.DefinitionName `json:"source"` +} diff --git a/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_input_definition.go b/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_input_definition.go new file mode 100644 index 00000000..af241082 --- /dev/null +++ b/Exesh/internal/domain/execution/input/inputs/filestorage_bucket_input_definition.go @@ -0,0 +1,12 @@ +package inputs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/source" +) + +type FilestorageBucketInputDefinition struct { + input.DefinitionDetails + SourceDefinitionName source.DefinitionName `json:"source"` + File string `json:"file"` +} diff --git a/Exesh/internal/domain/execution/input/inputs/inline_input_definition.go b/Exesh/internal/domain/execution/input/inputs/inline_input_definition.go new file mode 100644 index 00000000..fdb6c02a --- /dev/null +++ b/Exesh/internal/domain/execution/input/inputs/inline_input_definition.go @@ -0,0 +1,11 @@ +package inputs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/source" +) + +type InlineInputDefinition struct { + input.DefinitionDetails + SourceDefinitionName source.DefinitionName `json:"source"` +} diff --git a/Exesh/internal/domain/execution/input/inputs/input_definition.go b/Exesh/internal/domain/execution/input/inputs/input_definition.go new file mode 100644 index 00000000..5c8f8694 --- /dev/null +++ b/Exesh/internal/domain/execution/input/inputs/input_definition.go @@ -0,0 +1,53 @@ +package inputs + +import ( + "encoding/json" + "exesh/internal/domain/execution/input" + "fmt" +) + +type Definition struct { + input.IDefinition +} + +func (def *Definition) UnmarshalJSON(data []byte) error { + var details input.DefinitionDetails + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal input definition details: %w", err) + } + + switch details.Type { + case input.ArtifactDefinition: + def.IDefinition = &ArtifactInputDefinition{} + case input.InlineDefinition: + def.IDefinition = &InlineInputDefinition{} + case input.FilestorageBucketDefinition: + def.IDefinition = &FilestorageBucketInputDefinition{} + case input.FilestorageBucketFileDefinition: + def.IDefinition = &FilestorageBucketFileInputDefinition{} + default: + return fmt.Errorf("unknown input definition type: %s", details.Type) + } + + if err := json.Unmarshal(data, def.IDefinition); err != nil { + return fmt.Errorf("failed to unmarshal %s input definition: %w", details.Type, err) + } + + return nil +} + +func (def *Definition) AsArtifact() *ArtifactInputDefinition { + return def.IDefinition.(*ArtifactInputDefinition) +} + +func (def *Definition) AsInline() *InlineInputDefinition { + return def.IDefinition.(*InlineInputDefinition) +} + +func (def *Definition) AsFilestorageBucket() *FilestorageBucketInputDefinition { + return def.IDefinition.(*FilestorageBucketInputDefinition) +} + +func (def *Definition) AsFilestorageBucketFile() *FilestorageBucketFileInputDefinition { + return def.IDefinition.(*FilestorageBucketFileInputDefinition) +} diff --git a/Exesh/internal/domain/execution/inputs/artifact_input.go b/Exesh/internal/domain/execution/inputs/artifact_input.go deleted file mode 100644 index 468acbf4..00000000 --- a/Exesh/internal/domain/execution/inputs/artifact_input.go +++ /dev/null @@ -1,20 +0,0 @@ -package inputs - -import "exesh/internal/domain/execution" - -type ArtifactInput struct { - execution.InputDetails - JobID execution.JobID `json:"job_id"` - WorkerID string `json:"worker_id"` -} - -func NewArtifactInput(file string, jobID execution.JobID, workerID string) ArtifactInput { - return ArtifactInput{ - InputDetails: execution.InputDetails{ - Type: execution.ArtifactInputType, - File: file, - }, - JobID: jobID, - WorkerID: workerID, - } -} diff --git a/Exesh/internal/domain/execution/inputs/filestorage_bucket_input.go b/Exesh/internal/domain/execution/inputs/filestorage_bucket_input.go deleted file mode 100644 index 7b9cbe0a..00000000 --- a/Exesh/internal/domain/execution/inputs/filestorage_bucket_input.go +++ /dev/null @@ -1,24 +0,0 @@ -package inputs - -import ( - "exesh/internal/domain/execution" - - "github.com/DIvanCode/filestorage/pkg/bucket" -) - -type FilestorageBucketInput struct { - execution.InputDetails - BucketID bucket.ID `json:"bucket_id"` - DownloadEndpoint string `json:"download_endpoint"` -} - -func NewFilestorageBucketInput(file string, bucketID bucket.ID, downloadEndpoint string) FilestorageBucketInput { - return FilestorageBucketInput{ - InputDetails: execution.InputDetails{ - Type: execution.FilestorageBucketInputType, - File: file, - }, - BucketID: bucketID, - DownloadEndpoint: downloadEndpoint, - } -} diff --git a/Exesh/internal/domain/execution/inputs/inputs.go b/Exesh/internal/domain/execution/inputs/inputs.go deleted file mode 100644 index a91c9d9d..00000000 --- a/Exesh/internal/domain/execution/inputs/inputs.go +++ /dev/null @@ -1,31 +0,0 @@ -package inputs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "fmt" -) - -func UnmarshalInputJSON(data []byte) (input execution.Input, err error) { - var details execution.InputDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal input details: %w", err) - return - } - - switch details.Type { - case execution.ArtifactInputType: - input = &ArtifactInput{} - case execution.FilestorageBucketInputType: - input = &FilestorageBucketInput{} - default: - err = fmt.Errorf("unknown input type: %s", details.Type) - return - } - - if err = json.Unmarshal(data, input); err != nil { - err = fmt.Errorf("failed to unmarshal %s input: %w", details.Type, err) - return - } - return -} diff --git a/Exesh/internal/domain/execution/job.go b/Exesh/internal/domain/execution/job.go deleted file mode 100644 index 0c8e23a8..00000000 --- a/Exesh/internal/domain/execution/job.go +++ /dev/null @@ -1,75 +0,0 @@ -package execution - -import ( - "crypto/sha1" - "encoding/json" - "fmt" -) - -type ( - Job interface { - GetID() JobID - GetType() JobType - GetInputs() []Input - GetOutput() Output - } - - JobDetails struct { - ID JobID `json:"id"` - Type JobType `json:"type"` - } - - JobID [2 * sha1.Size]byte - - JobType string -) - -const ( - CompileCppJobType JobType = "compile_cpp" - CompileGoJobType JobType = "compile_go" - RunCppJobType JobType = "run_cpp" - RunPyJobType JobType = "run_py" - RunGoJobType JobType = "run_go" - CheckCppJobType JobType = "check_cpp" -) - -func (job JobDetails) GetID() JobID { - return job.ID -} - -func (job JobDetails) GetType() JobType { - return job.Type -} - -func (id JobID) String() string { - return string(id[:]) -} - -func (id *JobID) FromString(s string) error { - if len(s) != len(id) { - return fmt.Errorf("invalid hex string length") - } - for _, c := range s { - if '0' <= c && c <= '9' { - continue - } - if 'a' <= c && c <= 'f' { - continue - } - return fmt.Errorf("invalid hex string char: %c", c) - } - copy(id[:], s) - return nil -} - -func (id JobID) MarshalJSON() ([]byte, error) { - return json.Marshal(id.String()) -} - -func (id *JobID) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("id should be a string, got %s", data) - } - return id.FromString(s) -} diff --git a/Exesh/internal/domain/execution/job/id.go b/Exesh/internal/domain/execution/job/id.go new file mode 100644 index 00000000..91b189d1 --- /dev/null +++ b/Exesh/internal/domain/execution/job/id.go @@ -0,0 +1,42 @@ +package job + +import ( + "crypto/sha1" + "encoding/json" + "fmt" +) + +type ID [2 * sha1.Size]byte + +func (id *ID) String() string { + return string(id[:]) +} + +func (id *ID) FromString(s string) error { + if len(s) != len(id) { + return fmt.Errorf("invalid hex string length") + } + for _, c := range s { + if '0' <= c && c <= '9' { + continue + } + if 'a' <= c && c <= 'f' { + continue + } + return fmt.Errorf("invalid hex string char: %c", c) + } + copy(id[:], s) + return nil +} + +func (id *ID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.String()) +} + +func (id *ID) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("id should be a string, got %s", data) + } + return id.FromString(s) +} diff --git a/Exesh/internal/domain/execution/job/job.go b/Exesh/internal/domain/execution/job/job.go new file mode 100644 index 00000000..5d8f2833 --- /dev/null +++ b/Exesh/internal/domain/execution/job/job.go @@ -0,0 +1,54 @@ +package job + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/output" +) + +type ( + IJob interface { + GetType() Type + GetID() ID + GetSuccessStatus() Status + GetInputs() []input.Input + GetOutput() *output.Output + GetDependencies() []ID + } + + Details struct { + Type Type `json:"type"` + ID ID `json:"id"` + SuccessStatus Status `json:"success_status"` + } + + Type string + Status string +) + +const ( + CompileCpp Type = "compile_cpp" + CompileGo Type = "compile_go" + RunCpp Type = "run_cpp" + RunPy Type = "run_py" + RunGo Type = "run_go" + CheckCpp Type = "check_cpp" + + StatusOK Status = "OK" + StatusCE Status = "CE" + StatusRE Status = "RE" + StatusTL Status = "TL" + StatusML Status = "ML" + StatusWA Status = "WA" +) + +func (jb *Details) GetType() Type { + return jb.Type +} + +func (jb *Details) GetID() ID { + return jb.ID +} + +func (jb *Details) GetSuccessStatus() Status { + return jb.SuccessStatus +} diff --git a/Exesh/internal/domain/execution/job/job_definition.go b/Exesh/internal/domain/execution/job/job_definition.go new file mode 100644 index 00000000..2a2a50a2 --- /dev/null +++ b/Exesh/internal/domain/execution/job/job_definition.go @@ -0,0 +1,29 @@ +package job + +type ( + IDefinition interface { + GetType() Type + GetName() DefinitionName + GetSuccessStatus() Status + } + + DefinitionDetails struct { + Type Type `json:"type"` + Name DefinitionName `json:"name"` + SuccessStatus Status `json:"success_status"` + } + + DefinitionName string +) + +func (def *DefinitionDetails) GetType() Type { + return def.Type +} + +func (def *DefinitionDetails) GetName() DefinitionName { + return def.Name +} + +func (def *DefinitionDetails) GetSuccessStatus() Status { + return def.SuccessStatus +} diff --git a/Exesh/internal/domain/execution/job/jobs/check_cpp_job.go b/Exesh/internal/domain/execution/job/jobs/check_cpp_job.go new file mode 100644 index 00000000..0c9990c6 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/check_cpp_job.go @@ -0,0 +1,47 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type CheckCppJob struct { + job.Details + CompiledChecker input.Input `json:"compiled_checker"` + CorrectOutput input.Input `json:"correct_output"` + SuspectOutput input.Input `json:"suspect_output"` +} + +func NewCheckCppJob( + id job.ID, + successStatus job.Status, + compiledChecker input.Input, + correctOutput input.Input, + suspectOutput input.Input, +) Job { + return Job{ + &CheckCppJob{ + Details: job.Details{ + ID: id, + Type: job.CheckCpp, + SuccessStatus: successStatus, + }, + CompiledChecker: compiledChecker, + CorrectOutput: correctOutput, + SuspectOutput: suspectOutput, + }, + } +} + +func (jb *CheckCppJob) GetInputs() []input.Input { + return []input.Input{jb.CompiledChecker, jb.CorrectOutput, jb.SuspectOutput} +} + +func (jb *CheckCppJob) GetOutput() *output.Output { + return nil +} + +func (jb *CheckCppJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/check_cpp_job_definition.go b/Exesh/internal/domain/execution/job/jobs/check_cpp_job_definition.go new file mode 100644 index 00000000..d2fe8039 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/check_cpp_job_definition.go @@ -0,0 +1,13 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type CheckCppJobDefinition struct { + job.DefinitionDetails + CompiledChecker inputs.Definition `json:"compiled_checker"` + CorrectOutput inputs.Definition `json:"correct_output"` + SuspectOutput inputs.Definition `json:"suspect_output"` +} diff --git a/Exesh/internal/domain/execution/job/jobs/compile_cpp_job.go b/Exesh/internal/domain/execution/job/jobs/compile_cpp_job.go new file mode 100644 index 00000000..4db1d2f1 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/compile_cpp_job.go @@ -0,0 +1,44 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type CompileCppJob struct { + job.Details + Code input.Input `json:"code"` + CompiledCode output.Output `json:"compiled_code"` +} + +func NewCompileCppJob( + id job.ID, + successStatus job.Status, + code input.Input, + compiledCode output.Output, +) Job { + return Job{ + &CompileCppJob{ + Details: job.Details{ + ID: id, + Type: job.CompileCpp, + SuccessStatus: successStatus, + }, + Code: code, + CompiledCode: compiledCode, + }, + } +} + +func (jb *CompileCppJob) GetInputs() []input.Input { + return []input.Input{jb.Code} +} + +func (jb *CompileCppJob) GetOutput() *output.Output { + return &jb.CompiledCode +} + +func (jb *CompileCppJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/compile_cpp_job_definition.go b/Exesh/internal/domain/execution/job/jobs/compile_cpp_job_definition.go new file mode 100644 index 00000000..b5795a8e --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/compile_cpp_job_definition.go @@ -0,0 +1,11 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type CompileCppJobDefinition struct { + job.DefinitionDetails + Code inputs.Definition `json:"code"` +} diff --git a/Exesh/internal/domain/execution/job/jobs/compile_go_job.go b/Exesh/internal/domain/execution/job/jobs/compile_go_job.go new file mode 100644 index 00000000..f6f85eb1 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/compile_go_job.go @@ -0,0 +1,44 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type CompileGoJob struct { + job.Details + Code input.Input `json:"code"` + CompiledCode output.Output `json:"compiled_code"` +} + +func NewCompileGoJob( + id job.ID, + successStatus job.Status, + code input.Input, + compiledCode output.Output, +) Job { + return Job{ + &CompileGoJob{ + Details: job.Details{ + ID: id, + Type: job.CompileGo, + SuccessStatus: successStatus, + }, + Code: code, + CompiledCode: compiledCode, + }, + } +} + +func (jb *CompileGoJob) GetInputs() []input.Input { + return []input.Input{jb.Code} +} + +func (jb *CompileGoJob) GetOutput() *output.Output { + return &jb.CompiledCode +} + +func (jb *CompileGoJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/compile_go_job_definition.go b/Exesh/internal/domain/execution/job/jobs/compile_go_job_definition.go new file mode 100644 index 00000000..7a44fe67 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/compile_go_job_definition.go @@ -0,0 +1,11 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type CompileGoJobDefinition struct { + job.DefinitionDetails + Code inputs.Definition `json:"code"` +} diff --git a/Exesh/internal/domain/execution/job/jobs/job.go b/Exesh/internal/domain/execution/job/jobs/job.go new file mode 100644 index 00000000..6c1b06c7 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/job.go @@ -0,0 +1,79 @@ +package jobs + +import ( + "encoding/json" + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "fmt" +) + +type Job struct { + job.IJob +} + +func (jb *Job) UnmarshalJSON(data []byte) error { + var details job.Details + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal job details: %w", err) + } + + switch details.Type { + case job.CompileCpp: + jb.IJob = &CompileCppJob{} + case job.CompileGo: + jb.IJob = &CompileGoJob{} + case job.RunCpp: + jb.IJob = &RunCppJob{} + case job.RunGo: + jb.IJob = &RunGoJob{} + case job.RunPy: + jb.IJob = &RunPyJob{} + case job.CheckCpp: + jb.IJob = &CheckCppJob{} + default: + return fmt.Errorf("unknown job type: %s", details.Type) + } + + if err := json.Unmarshal(data, jb.IJob); err != nil { + return fmt.Errorf("failed to unmarshal %s job: %w", details.Type, err) + } + + return nil +} + +func (jb *Job) AsCompileCpp() *CompileCppJob { + return jb.IJob.(*CompileCppJob) +} + +func (jb *Job) AsCompileGo() *CompileGoJob { + return jb.IJob.(*CompileGoJob) +} + +func (jb *Job) AsRunCpp() *RunCppJob { + return jb.IJob.(*RunCppJob) +} + +func (jb *Job) AsRunGo() *RunGoJob { + return jb.IJob.(*RunGoJob) +} + +func (jb *Job) AsRunPy() *RunPyJob { + return jb.IJob.(*RunPyJob) +} + +func (jb *Job) AsCheckCpp() *CheckCppJob { + return jb.IJob.(*CheckCppJob) +} + +func getDependencies(ins []input.Input) []job.ID { + deps := make([]job.ID, 0) + for _, in := range ins { + if in.Type == input.Artifact { + var jobID job.ID + if err := jobID.FromString(in.SourceID.String()); err == nil { + deps = append(deps, jobID) + } + } + } + return deps +} diff --git a/Exesh/internal/domain/execution/job/jobs/job_definition.go b/Exesh/internal/domain/execution/job/jobs/job_definition.go new file mode 100644 index 00000000..b2004ab3 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/job_definition.go @@ -0,0 +1,65 @@ +package jobs + +import ( + "encoding/json" + "exesh/internal/domain/execution/job" + "fmt" +) + +type Definition struct { + job.IDefinition +} + +func (def *Definition) UnmarshalJSON(data []byte) error { + var details job.DefinitionDetails + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal job definition details: %w", err) + } + + switch details.Type { + case job.CompileCpp: + def.IDefinition = &CompileCppJobDefinition{} + case job.CompileGo: + def.IDefinition = &CompileGoJobDefinition{} + case job.RunCpp: + def.IDefinition = &RunCppJobDefinition{} + case job.RunGo: + def.IDefinition = &RunGoJobDefinition{} + case job.RunPy: + def.IDefinition = &RunPyJobDefinition{} + case job.CheckCpp: + def.IDefinition = &CheckCppJobDefinition{} + default: + return fmt.Errorf("unknown job definition type: %s", details.Type) + } + + if err := json.Unmarshal(data, def.IDefinition); err != nil { + return fmt.Errorf("failed to unmarshal %s job definition: %w", details.Type, err) + } + + return nil +} + +func (def *Definition) AsCompileCpp() *CompileCppJobDefinition { + return def.IDefinition.(*CompileCppJobDefinition) +} + +func (def *Definition) AsCompileGo() *CompileGoJobDefinition { + return def.IDefinition.(*CompileGoJobDefinition) +} + +func (def *Definition) AsRunCpp() *RunCppJobDefinition { + return def.IDefinition.(*RunCppJobDefinition) +} + +func (def *Definition) AsRunGo() *RunGoJobDefinition { + return def.IDefinition.(*RunGoJobDefinition) +} + +func (def *Definition) AsRunPy() *RunPyJobDefinition { + return def.IDefinition.(*RunPyJobDefinition) +} + +func (def *Definition) AsCheckCpp() *CheckCppJobDefinition { + return def.IDefinition.(*CheckCppJobDefinition) +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_cpp_job.go b/Exesh/internal/domain/execution/job/jobs/run_cpp_job.go new file mode 100644 index 00000000..8aa02409 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_cpp_job.go @@ -0,0 +1,56 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type RunCppJob struct { + job.Details + CompiledCode input.Input `json:"compiled_code"` + RunInput input.Input `json:"run_input"` + RunOutput output.Output `json:"run_output"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} + +func NewRunCppJob( + id job.ID, + successStatus job.Status, + compiledCode input.Input, + runInput input.Input, + runOutput output.Output, + timeLimit int, + memoryLimit int, + showOutput bool, +) Job { + return Job{ + &RunCppJob{ + Details: job.Details{ + ID: id, + Type: job.RunCpp, + SuccessStatus: successStatus, + }, + CompiledCode: compiledCode, + RunInput: runInput, + RunOutput: runOutput, + TimeLimit: timeLimit, + MemoryLimit: memoryLimit, + ShowOutput: showOutput, + }, + } +} + +func (jb *RunCppJob) GetInputs() []input.Input { + return []input.Input{jb.CompiledCode, jb.RunInput} +} + +func (jb *RunCppJob) GetOutput() *output.Output { + return &jb.RunOutput +} + +func (jb *RunCppJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_cpp_job_definition.go b/Exesh/internal/domain/execution/job/jobs/run_cpp_job_definition.go new file mode 100644 index 00000000..96f30baf --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_cpp_job_definition.go @@ -0,0 +1,15 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type RunCppJobDefinition struct { + job.DefinitionDetails + CompiledCode inputs.Definition `json:"compiled_code"` + RunInput inputs.Definition `json:"input"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_go_job.go b/Exesh/internal/domain/execution/job/jobs/run_go_job.go new file mode 100644 index 00000000..64b70032 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_go_job.go @@ -0,0 +1,56 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type RunGoJob struct { + job.Details + CompiledCode input.Input `json:"compiled_code"` + RunInput input.Input `json:"run_input"` + RunOutput output.Output `json:"run_output"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} + +func NewRunGoJob( + id job.ID, + successStatus job.Status, + code input.Input, + runInput input.Input, + runOutput output.Output, + timeLimit int, + memoryLimit int, + showOutput bool, +) Job { + return Job{ + &RunGoJob{ + Details: job.Details{ + ID: id, + Type: job.RunGo, + SuccessStatus: successStatus, + }, + CompiledCode: code, + RunInput: runInput, + RunOutput: runOutput, + TimeLimit: timeLimit, + MemoryLimit: memoryLimit, + ShowOutput: showOutput, + }, + } +} + +func (jb *RunGoJob) GetInputs() []input.Input { + return []input.Input{jb.CompiledCode, jb.RunInput} +} + +func (jb *RunGoJob) GetOutput() *output.Output { + return &jb.RunOutput +} + +func (jb *RunGoJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_go_job_definition.go b/Exesh/internal/domain/execution/job/jobs/run_go_job_definition.go new file mode 100644 index 00000000..b59c003f --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_go_job_definition.go @@ -0,0 +1,15 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type RunGoJobDefinition struct { + job.DefinitionDetails + CompiledCode inputs.Definition `json:"compiled_code"` + RunInput inputs.Definition `json:"input"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_py_job.go b/Exesh/internal/domain/execution/job/jobs/run_py_job.go new file mode 100644 index 00000000..6c3588c2 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_py_job.go @@ -0,0 +1,56 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/output" +) + +type RunPyJob struct { + job.Details + Code input.Input `json:"code"` + RunInput input.Input `json:"run_input"` + RunOutput output.Output `json:"run_output"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} + +func NewRunPyJob( + id job.ID, + successStatus job.Status, + code input.Input, + runInput input.Input, + runOutput output.Output, + timeLimit int, + memoryLimit int, + showOutput bool, +) Job { + return Job{ + &RunPyJob{ + Details: job.Details{ + ID: id, + Type: job.RunPy, + SuccessStatus: successStatus, + }, + Code: code, + RunInput: runInput, + RunOutput: runOutput, + TimeLimit: timeLimit, + MemoryLimit: memoryLimit, + ShowOutput: showOutput, + }, + } +} + +func (jb *RunPyJob) GetInputs() []input.Input { + return []input.Input{jb.Code, jb.RunInput} +} + +func (jb *RunPyJob) GetOutput() *output.Output { + return &jb.RunOutput +} + +func (jb *RunPyJob) GetDependencies() []job.ID { + return getDependencies(jb.GetInputs()) +} diff --git a/Exesh/internal/domain/execution/job/jobs/run_py_job_definition.go b/Exesh/internal/domain/execution/job/jobs/run_py_job_definition.go new file mode 100644 index 00000000..1092a696 --- /dev/null +++ b/Exesh/internal/domain/execution/job/jobs/run_py_job_definition.go @@ -0,0 +1,15 @@ +package jobs + +import ( + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" +) + +type RunPyJobDefinition struct { + job.DefinitionDetails + Code inputs.Definition `json:"code"` + RunInput inputs.Definition `json:"input"` + TimeLimit int `json:"time_limit"` + MemoryLimit int `json:"memory_limit"` + ShowOutput bool `json:"show_output"` +} diff --git a/Exesh/internal/domain/execution/jobs/check_cpp_job.go b/Exesh/internal/domain/execution/jobs/check_cpp_job.go deleted file mode 100644 index 5b1955bf..00000000 --- a/Exesh/internal/domain/execution/jobs/check_cpp_job.go +++ /dev/null @@ -1,68 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "fmt" -) - -type CheckCppJob struct { - execution.JobDetails - CompiledChecker execution.Input `json:"compiled_checker"` - CorrectOutput execution.Input `json:"correct_output"` - SuspectOutput execution.Input `json:"suspect_output"` -} - -func NewCheckCppJob( - id execution.JobID, - compiledChecker execution.Input, - correctOutput execution.Input, - suspectOutput execution.Input, -) CheckCppJob { - return CheckCppJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.CheckCppJobType, - }, - CompiledChecker: compiledChecker, - CorrectOutput: correctOutput, - SuspectOutput: suspectOutput, - } -} - -func (job CheckCppJob) GetInputs() []execution.Input { - return []execution.Input{job.CompiledChecker, job.CorrectOutput, job.SuspectOutput} -} - -func (job CheckCppJob) GetOutput() execution.Output { - return nil -} - -func (job *CheckCppJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - CompiledChecker json.RawMessage `json:"compiled_checker"` - CorrectOutput json.RawMessage `json:"correct_output"` - SuspectOutput json.RawMessage `json:"suspect_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.CompiledChecker, err = inputs.UnmarshalInputJSON(attributes.CompiledChecker); err != nil { - return fmt.Errorf("failed to unmarshal compiled_checker input: %w", err) - } - if job.CorrectOutput, err = inputs.UnmarshalInputJSON(attributes.CorrectOutput); err != nil { - return fmt.Errorf("failed to unmarshal correct_output input: %w", err) - } - if job.SuspectOutput, err = inputs.UnmarshalInputJSON(attributes.SuspectOutput); err != nil { - return fmt.Errorf("failed to unmarshal suspect_output input: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs/compile_cpp_job.go b/Exesh/internal/domain/execution/jobs/compile_cpp_job.go deleted file mode 100644 index 13cdb62a..00000000 --- a/Exesh/internal/domain/execution/jobs/compile_cpp_job.go +++ /dev/null @@ -1,58 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/outputs" - "fmt" -) - -type CompileCppJob struct { - execution.JobDetails - Code execution.Input `json:"code"` - CompiledCode execution.Output `json:"compiled_code"` -} - -func NewCompileCppJob(id execution.JobID, code execution.Input, compiledCode execution.Output) CompileCppJob { - return CompileCppJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.CompileCppJobType, - }, - Code: code, - CompiledCode: compiledCode, - } -} - -func (job CompileCppJob) GetInputs() []execution.Input { - return []execution.Input{job.Code} -} - -func (job CompileCppJob) GetOutput() execution.Output { - return job.CompiledCode -} - -func (job *CompileCppJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - CompiledCode json.RawMessage `json:"compiled_code"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.Code, err = inputs.UnmarshalInputJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code input: %w", err) - } - if job.CompiledCode, err = outputs.UnmarshalOutputJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal compiled_code output: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs/compile_go_job.go b/Exesh/internal/domain/execution/jobs/compile_go_job.go deleted file mode 100644 index e6005687..00000000 --- a/Exesh/internal/domain/execution/jobs/compile_go_job.go +++ /dev/null @@ -1,58 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/outputs" - "fmt" -) - -type CompileGoJob struct { - execution.JobDetails - Code execution.Input `json:"code"` - CompiledCode execution.Output `json:"compiled_code"` -} - -func NewCompileGoJob(id execution.JobID, code execution.Input, compiledCode execution.Output) CompileGoJob { - return CompileGoJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.CompileGoJobType, - }, - Code: code, - CompiledCode: compiledCode, - } -} - -func (job CompileGoJob) GetInputs() []execution.Input { - return []execution.Input{job.Code} -} - -func (job CompileGoJob) GetOutput() execution.Output { - return job.CompiledCode -} - -func (job *CompileGoJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - CompiledCode json.RawMessage `json:"compiled_code"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.Code, err = inputs.UnmarshalInputJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code input: %w", err) - } - if job.CompiledCode, err = outputs.UnmarshalOutputJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal compiled_code output: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs/jobs.go b/Exesh/internal/domain/execution/jobs/jobs.go deleted file mode 100644 index 276d7fbc..00000000 --- a/Exesh/internal/domain/execution/jobs/jobs.go +++ /dev/null @@ -1,59 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "fmt" -) - -func UnmarshalJSON(data []byte) (job execution.Job, err error) { - var details execution.JobDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal job etails: %w", err) - return job, err - } - - switch details.Type { - case execution.CompileCppJobType: - job = &CompileCppJob{} - case execution.CompileGoJobType: - job = &CompileGoJob{} - case execution.RunCppJobType: - job = &RunCppJob{} - case execution.RunGoJobType: - job = &RunGoJob{} - case execution.RunPyJobType: - job = &RunPyJob{} - case execution.CheckCppJobType: - job = &CheckCppJob{} - default: - err = fmt.Errorf("unknown job type: %s", details.Type) - return job, err - } - - if err = json.Unmarshal(data, job); err != nil { - err = fmt.Errorf("failed to unmarshal %s job: %w", details.Type, err) - return job, err - } - return job, err -} - -func UnmarshalJobsJSON(data []byte) (jobsArray []execution.Job, err error) { - var array []json.RawMessage - if err = json.Unmarshal(data, &array); err != nil { - err = fmt.Errorf("failed to unmarshal jobs array: %w", err) - return jobsArray, err - } - - jobsArray = make([]execution.Job, 0, len(array)) - for _, item := range array { - var job execution.Job - job, err = UnmarshalJSON(item) - if err != nil { - err = fmt.Errorf("failed to unmarshal job: %w", err) - return jobsArray, err - } - jobsArray = append(jobsArray, job) - } - return jobsArray, err -} diff --git a/Exesh/internal/domain/execution/jobs/run_cpp_job.go b/Exesh/internal/domain/execution/jobs/run_cpp_job.go deleted file mode 100644 index 7dbe276f..00000000 --- a/Exesh/internal/domain/execution/jobs/run_cpp_job.go +++ /dev/null @@ -1,83 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/outputs" - "fmt" -) - -type RunCppJob struct { - execution.JobDetails - CompiledCode execution.Input `json:"compiled_code"` - RunInput execution.Input `json:"run_input"` - RunOutput execution.Output `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func NewRunCppJob( - id execution.JobID, - compiledCode execution.Input, - runInput execution.Input, - runOutput execution.Output, - timeLimit int, - memoryLimit int, - showOutput bool) RunCppJob { - return RunCppJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.RunCppJobType, - }, - CompiledCode: compiledCode, - RunInput: runInput, - RunOutput: runOutput, - TimeLimit: timeLimit, - MemoryLimit: memoryLimit, - ShowOutput: showOutput, - } -} - -func (job RunCppJob) GetInputs() []execution.Input { - return []execution.Input{job.CompiledCode, job.RunInput} -} - -func (job RunCppJob) GetOutput() execution.Output { - return job.RunOutput -} - -func (job *RunCppJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - CompiledCode json.RawMessage `json:"compiled_code"` - RunInput json.RawMessage `json:"run_input"` - RunOutput json.RawMessage `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.CompiledCode, err = inputs.UnmarshalInputJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal compiled_code input: %w", err) - } - if job.RunInput, err = inputs.UnmarshalInputJSON(attributes.RunInput); err != nil { - return fmt.Errorf("failed to unmarshal run_input input: %w", err) - } - if job.RunOutput, err = outputs.UnmarshalOutputJSON(attributes.RunOutput); err != nil { - return fmt.Errorf("failed to unmarshal run_output output: %w", err) - } - job.TimeLimit = attributes.TimeLimit - job.MemoryLimit = attributes.MemoryLimit - job.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs/run_go_job.go b/Exesh/internal/domain/execution/jobs/run_go_job.go deleted file mode 100644 index 4f89cefd..00000000 --- a/Exesh/internal/domain/execution/jobs/run_go_job.go +++ /dev/null @@ -1,84 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/outputs" - "fmt" -) - -type RunGoJob struct { - execution.JobDetails - CompiledCode execution.Input `json:"compiled_code"` - RunInput execution.Input `json:"run_input"` - RunOutput execution.Output `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func NewRunGoJob( - id execution.JobID, - code execution.Input, - runInput execution.Input, - runOutput execution.Output, - timeLimit int, - memoryLimit int, - showOutput bool, -) RunGoJob { - return RunGoJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.RunGoJobType, - }, - CompiledCode: code, - RunInput: runInput, - RunOutput: runOutput, - TimeLimit: timeLimit, - MemoryLimit: memoryLimit, - ShowOutput: showOutput, - } -} - -func (job RunGoJob) GetInputs() []execution.Input { - return []execution.Input{job.CompiledCode, job.RunInput} -} - -func (job RunGoJob) GetOutput() execution.Output { - return job.RunOutput -} - -func (job *RunGoJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - CompiledCode json.RawMessage `json:"compiled_code"` - RunInput json.RawMessage `json:"run_input"` - RunOutput json.RawMessage `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.CompiledCode, err = inputs.UnmarshalInputJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal code input: %w", err) - } - if job.RunInput, err = inputs.UnmarshalInputJSON(attributes.RunInput); err != nil { - return fmt.Errorf("failed to unmarshal run_input input: %w", err) - } - if job.RunOutput, err = outputs.UnmarshalOutputJSON(attributes.RunOutput); err != nil { - return fmt.Errorf("failed to unmarshal run_output output: %w", err) - } - job.TimeLimit = attributes.TimeLimit - job.MemoryLimit = attributes.MemoryLimit - job.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs/run_py_job.go b/Exesh/internal/domain/execution/jobs/run_py_job.go deleted file mode 100644 index 35dfc8c7..00000000 --- a/Exesh/internal/domain/execution/jobs/run_py_job.go +++ /dev/null @@ -1,83 +0,0 @@ -package jobs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/outputs" - "fmt" -) - -type RunPyJob struct { - execution.JobDetails - Code execution.Input `json:"code"` - RunInput execution.Input `json:"run_input"` - RunOutput execution.Output `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func NewRunPyJob( - id execution.JobID, - code execution.Input, - runInput execution.Input, - runOutput execution.Output, - timeLimit int, - memoryLimit int, - showOutput bool) RunPyJob { - return RunPyJob{ - JobDetails: execution.JobDetails{ - ID: id, - Type: execution.RunPyJobType, - }, - Code: code, - RunInput: runInput, - RunOutput: runOutput, - TimeLimit: timeLimit, - MemoryLimit: memoryLimit, - ShowOutput: showOutput, - } -} - -func (job RunPyJob) GetInputs() []execution.Input { - return []execution.Input{job.Code, job.RunInput} -} - -func (job RunPyJob) GetOutput() execution.Output { - return job.RunOutput -} - -func (job *RunPyJob) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &job.JobDetails); err != nil { - return fmt.Errorf("failed to unmarshal details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - RunInput json.RawMessage `json:"run_input"` - RunOutput json.RawMessage `json:"run_output"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s job attributes: %w", job.Type, err) - } - - if job.Code, err = inputs.UnmarshalInputJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code input: %w", err) - } - if job.RunInput, err = inputs.UnmarshalInputJSON(attributes.RunInput); err != nil { - return fmt.Errorf("inputs to unmarshal run_input input: %w", err) - } - if job.RunOutput, err = outputs.UnmarshalOutputJSON(attributes.RunOutput); err != nil { - return fmt.Errorf("failed to unmarshal run_output output: %w", err) - } - job.TimeLimit = attributes.TimeLimit - job.MemoryLimit = attributes.MemoryLimit - job.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/jobs_graph.go b/Exesh/internal/domain/execution/jobs_graph.go new file mode 100644 index 00000000..f8337129 --- /dev/null +++ b/Exesh/internal/domain/execution/jobs_graph.go @@ -0,0 +1,82 @@ +package execution + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "sync" +) + +type jobsGraph struct { + mu sync.Mutex + + succJobs map[job.ID][]jobs.Job + doneDeps map[job.ID]int + + toPick []jobs.Job + + totalJobs int + doneJobs int +} + +func newJobsGraph(jbs []jobs.Job) *jobsGraph { + g := jobsGraph{ + mu: sync.Mutex{}, + + succJobs: make(map[job.ID][]jobs.Job), + doneDeps: make(map[job.ID]int), + + toPick: make([]jobs.Job, 0), + + totalJobs: len(jbs), + doneJobs: 0, + } + + for _, jb := range jbs { + deps := jb.GetDependencies() + + for _, dep := range deps { + if _, ok := g.succJobs[dep]; !ok { + g.succJobs[dep] = make([]jobs.Job, 0) + } + g.succJobs[dep] = append(g.succJobs[dep], jb) + } + + g.doneDeps[jb.GetID()] = 0 + if len(deps) == 0 { + g.toPick = append(g.toPick, jb) + } + } + + return &g +} + +func (g *jobsGraph) pickJobs() []jobs.Job { + g.mu.Lock() + defer g.mu.Unlock() + + pickedJobs := make([]jobs.Job, 0, len(g.toPick)) + copy(pickedJobs, g.toPick) + g.toPick = make([]jobs.Job, 0) + + return pickedJobs +} + +func (g *jobsGraph) doneJob(jobID job.ID) { + g.mu.Lock() + defer g.mu.Unlock() + + g.doneJobs++ + for _, succJob := range g.succJobs[jobID] { + g.doneDeps[succJob.GetID()]++ + if g.doneDeps[succJob.GetID()] == len(succJob.GetDependencies()) { + g.toPick = append(g.toPick, succJob) + } + } +} + +func (g *jobsGraph) isDone() bool { + g.mu.Lock() + defer g.mu.Unlock() + + return g.doneJobs == g.totalJobs +} diff --git a/Exesh/internal/domain/execution/message.go b/Exesh/internal/domain/execution/message.go deleted file mode 100644 index 7dabd5c8..00000000 --- a/Exesh/internal/domain/execution/message.go +++ /dev/null @@ -1,33 +0,0 @@ -package execution - -type ( - Message interface { - GetType() MessageType - GetExecutionID() ID - } - - MessageDetails struct { - ExecutionID ID `json:"execution_id"` - Type MessageType `json:"type"` - } - - MessageType string - - MessageStatus string -) - -const ( - StartExecutionMessage MessageType = "start" - CompileStepMessage MessageType = "compile" - RunStepMessage MessageType = "run" - CheckStepMessage MessageType = "check" - FinishExecutionMessage MessageType = "finish" -) - -func (m MessageDetails) GetType() MessageType { - return m.Type -} - -func (m MessageDetails) GetExecutionID() ID { - return m.ExecutionID -} diff --git a/Exesh/internal/domain/execution/message/message.go b/Exesh/internal/domain/execution/message/message.go new file mode 100644 index 00000000..d2427631 --- /dev/null +++ b/Exesh/internal/domain/execution/message/message.go @@ -0,0 +1,34 @@ +package message + +import "exesh/internal/domain/execution" + +type ( + IMessage interface { + GetType() Type + GetExecutionID() execution.ID + } + + Details struct { + ExecutionID execution.ID `json:"execution_id"` + Type Type `json:"type"` + } + + Type string + Status string +) + +const ( + StartExecution Type = "start" + CompileJob Type = "compile" + RunJob Type = "run" + CheckJob Type = "check" + FinishExecution Type = "finish" +) + +func (msg *Details) GetType() Type { + return msg.Type +} + +func (msg *Details) GetExecutionID() execution.ID { + return msg.ExecutionID +} diff --git a/Exesh/internal/domain/execution/message/messages/check_job_message.go b/Exesh/internal/domain/execution/message/messages/check_job_message.go new file mode 100644 index 00000000..2556ad49 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/check_job_message.go @@ -0,0 +1,30 @@ +package messages + +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/message" +) + +type CheckJobMessage struct { + message.Details + JobName job.DefinitionName `json:"job"` + CheckStatus job.Status `json:"status"` +} + +func NewCheckJobMessage( + executionID execution.ID, + jobName job.DefinitionName, + status job.Status, +) Message { + return Message{ + &CheckJobMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.CheckJob, + }, + JobName: jobName, + CheckStatus: status, + }, + } +} diff --git a/Exesh/internal/domain/execution/message/messages/compile_job_message.go b/Exesh/internal/domain/execution/message/messages/compile_job_message.go new file mode 100644 index 00000000..3205c1c7 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/compile_job_message.go @@ -0,0 +1,48 @@ +package messages + +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/message" +) + +type CompileJobMessage struct { + message.Details + JobName job.DefinitionName `json:"job"` + CompileStatus job.Status `json:"status"` + Error string `json:"error,omitempty"` +} + +func NewCompileJobMessageOk( + executionID execution.ID, + jobName job.DefinitionName, +) Message { + return Message{ + &CompileJobMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.CompileJob, + }, + JobName: jobName, + CompileStatus: job.StatusOK, + }, + } +} + +func NewCompileJobMessageError( + executionID execution.ID, + jobName job.DefinitionName, + err string, +) Message { + return Message{ + &CompileJobMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.CompileJob, + }, + JobName: jobName, + CompileStatus: job.StatusCE, + Error: err, + }, + } +} diff --git a/Exesh/internal/domain/execution/message/messages/finish_execution_message.go b/Exesh/internal/domain/execution/message/messages/finish_execution_message.go new file mode 100644 index 00000000..22a4c3d1 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/finish_execution_message.go @@ -0,0 +1,34 @@ +package messages + +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/message" +) + +type FinishExecutionMessage struct { + message.Details + Error string `json:"error,omitempty"` +} + +func NewFinishExecutionMessageOk(executionID execution.ID) Message { + return Message{ + &FinishExecutionMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.FinishExecution, + }, + }, + } +} + +func NewFinishExecutionMessageError(executionID execution.ID, error string) Message { + return Message{ + &FinishExecutionMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.FinishExecution, + }, + Error: error, + }, + } +} diff --git a/Exesh/internal/domain/execution/message/messages/message.go b/Exesh/internal/domain/execution/message/messages/message.go new file mode 100644 index 00000000..3c4f37c4 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/message.go @@ -0,0 +1,39 @@ +package messages + +import ( + "encoding/json" + "exesh/internal/domain/execution/message" + "fmt" +) + +type Message struct { + message.IMessage +} + +func (msg *Message) UnmarshalJSON(data []byte) error { + var details message.Details + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal message details: %w", err) + } + + switch details.Type { + case message.StartExecution: + msg.IMessage = &StartExecutionMessage{} + case message.CompileJob: + msg.IMessage = &CompileJobMessage{} + case message.RunJob: + msg.IMessage = &RunJobMessage{} + case message.CheckJob: + msg.IMessage = &CheckJobMessage{} + case message.FinishExecution: + msg.IMessage = &FinishExecutionMessage{} + default: + return fmt.Errorf("unknown output type: %s", details.Type) + } + + if err := json.Unmarshal(data, msg.IMessage); err != nil { + return fmt.Errorf("failed to unmarshal %s message: %w", details.Type, err) + } + + return nil +} diff --git a/Exesh/internal/domain/execution/message/messages/run_job_message.go b/Exesh/internal/domain/execution/message/messages/run_job_message.go new file mode 100644 index 00000000..56270d71 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/run_job_message.go @@ -0,0 +1,49 @@ +package messages + +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/message" +) + +type RunJobMessage struct { + message.Details + JobName job.DefinitionName `json:"job"` + RunStatus job.Status `json:"status"` + Output string `json:"output,omitempty"` +} + +func NewRunJobMessage( + executionID execution.ID, + jobName job.DefinitionName, + status job.Status, +) Message { + return Message{ + &RunJobMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.RunJob, + }, + JobName: jobName, + RunStatus: status, + }, + } +} + +func NewRunJobMessageWithOutput( + executionID execution.ID, + jobName job.DefinitionName, + output string, +) Message { + return Message{ + &RunJobMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.RunJob, + }, + JobName: jobName, + RunStatus: job.StatusOK, + Output: output, + }, + } +} diff --git a/Exesh/internal/domain/execution/message/messages/start_execution_message.go b/Exesh/internal/domain/execution/message/messages/start_execution_message.go new file mode 100644 index 00000000..07e826d8 --- /dev/null +++ b/Exesh/internal/domain/execution/message/messages/start_execution_message.go @@ -0,0 +1,21 @@ +package messages + +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/message" +) + +type StartExecutionMessage struct { + message.Details +} + +func NewStartExecutionMessage(executionID execution.ID) Message { + return Message{ + &StartExecutionMessage{ + Details: message.Details{ + ExecutionID: executionID, + Type: message.StartExecution, + }, + }, + } +} diff --git a/Exesh/internal/domain/execution/messages/check_step_message.go b/Exesh/internal/domain/execution/messages/check_step_message.go deleted file mode 100644 index 3fa1221b..00000000 --- a/Exesh/internal/domain/execution/messages/check_step_message.go +++ /dev/null @@ -1,27 +0,0 @@ -package messages - -import ( - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/results" -) - -type CheckStepMessage struct { - execution.MessageDetails - StepName execution.StepName `json:"step_name"` - CheckStatus results.CheckStatus `json:"status"` -} - -func NewCheckStepMessage( - executionID execution.ID, - stepName execution.StepName, - status results.CheckStatus, -) CheckStepMessage { - return CheckStepMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.CheckStepMessage, - }, - StepName: stepName, - CheckStatus: status, - } -} diff --git a/Exesh/internal/domain/execution/messages/compile_step_message.go b/Exesh/internal/domain/execution/messages/compile_step_message.go deleted file mode 100644 index 9f1c9b40..00000000 --- a/Exesh/internal/domain/execution/messages/compile_step_message.go +++ /dev/null @@ -1,43 +0,0 @@ -package messages - -import ( - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/results" -) - -type CompileStepMessage struct { - execution.MessageDetails - StepName execution.StepName `json:"step_name"` - CompileStatus results.CompileStatus `json:"status"` - Error string `json:"error,omitempty"` -} - -func NewCompileStepMessage( - executionID execution.ID, - stepName execution.StepName, -) CompileStepMessage { - return CompileStepMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.CompileStepMessage, - }, - StepName: stepName, - CompileStatus: results.CompileStatusOK, - } -} - -func NewCompileStepMessageError( - executionID execution.ID, - stepName execution.StepName, - err string, -) CompileStepMessage { - return CompileStepMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.CompileStepMessage, - }, - StepName: stepName, - CompileStatus: results.CompileStatusCE, - Error: err, - } -} diff --git a/Exesh/internal/domain/execution/messages/finish_execution_message.go b/Exesh/internal/domain/execution/messages/finish_execution_message.go deleted file mode 100644 index f7031884..00000000 --- a/Exesh/internal/domain/execution/messages/finish_execution_message.go +++ /dev/null @@ -1,27 +0,0 @@ -package messages - -import "exesh/internal/domain/execution" - -type FinishExecutionMessage struct { - execution.MessageDetails - Error string `json:"error,omitempty"` -} - -func NewFinishExecutionMessage(executionID execution.ID) FinishExecutionMessage { - return FinishExecutionMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.FinishExecutionMessage, - }, - } -} - -func NewFinishExecutionMessageError(executionID execution.ID, error string) FinishExecutionMessage { - return FinishExecutionMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.FinishExecutionMessage, - }, - Error: error, - } -} diff --git a/Exesh/internal/domain/execution/messages/run_step_message.go b/Exesh/internal/domain/execution/messages/run_step_message.go deleted file mode 100644 index c7b5361b..00000000 --- a/Exesh/internal/domain/execution/messages/run_step_message.go +++ /dev/null @@ -1,44 +0,0 @@ -package messages - -import ( - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/results" -) - -type RunStepMessage struct { - execution.MessageDetails - StepName execution.StepName `json:"step_name"` - RunStatus results.RunStatus `json:"status"` - Output string `json:"output,omitempty"` -} - -func NewRunStepMessage( - executionID execution.ID, - stepName execution.StepName, - status results.RunStatus, -) RunStepMessage { - return RunStepMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.RunStepMessage, - }, - StepName: stepName, - RunStatus: status, - } -} - -func NewRunStepMessageWithOutput( - executionID execution.ID, - stepName execution.StepName, - output string, -) RunStepMessage { - return RunStepMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.RunStepMessage, - }, - StepName: stepName, - RunStatus: results.RunStatusOK, - Output: output, - } -} diff --git a/Exesh/internal/domain/execution/messages/start_execution_message.go b/Exesh/internal/domain/execution/messages/start_execution_message.go deleted file mode 100644 index eb1647f9..00000000 --- a/Exesh/internal/domain/execution/messages/start_execution_message.go +++ /dev/null @@ -1,16 +0,0 @@ -package messages - -import "exesh/internal/domain/execution" - -type StartExecutionMessage struct { - execution.MessageDetails -} - -func NewStartExecutionMessage(executionID execution.ID) StartExecutionMessage { - return StartExecutionMessage{ - MessageDetails: execution.MessageDetails{ - ExecutionID: executionID, - Type: execution.StartExecutionMessage, - }, - } -} diff --git a/Exesh/internal/domain/execution/output.go b/Exesh/internal/domain/execution/output.go deleted file mode 100644 index 239fdfab..00000000 --- a/Exesh/internal/domain/execution/output.go +++ /dev/null @@ -1,27 +0,0 @@ -package execution - -type ( - Output interface { - GetType() OutputType - GetFile() string - } - - OutputDetails struct { - Type OutputType `json:"type"` - File string `json:"file"` - } - - OutputType string -) - -const ( - ArtifactOutputType OutputType = "artifact" -) - -func (output OutputDetails) GetType() OutputType { - return output.Type -} - -func (output OutputDetails) GetFile() string { - return output.File -} diff --git a/Exesh/internal/domain/execution/output/output.go b/Exesh/internal/domain/execution/output/output.go new file mode 100644 index 00000000..0cc464f9 --- /dev/null +++ b/Exesh/internal/domain/execution/output/output.go @@ -0,0 +1,11 @@ +package output + +type Output struct { + File string `json:"file"` +} + +func NewOutput(file string) Output { + return Output{ + File: file, + } +} diff --git a/Exesh/internal/domain/execution/outputs/artifact_output.go b/Exesh/internal/domain/execution/outputs/artifact_output.go deleted file mode 100644 index 4de54c48..00000000 --- a/Exesh/internal/domain/execution/outputs/artifact_output.go +++ /dev/null @@ -1,18 +0,0 @@ -package outputs - -import "exesh/internal/domain/execution" - -type ArtifactOutput struct { - execution.OutputDetails - JobID execution.JobID `json:"job_id"` -} - -func NewArtifactOutput(file string, jobID execution.JobID) ArtifactOutput { - return ArtifactOutput{ - OutputDetails: execution.OutputDetails{ - Type: execution.ArtifactOutputType, - File: file, - }, - JobID: jobID, - } -} diff --git a/Exesh/internal/domain/execution/outputs/outputs.go b/Exesh/internal/domain/execution/outputs/outputs.go deleted file mode 100644 index ed9ffe76..00000000 --- a/Exesh/internal/domain/execution/outputs/outputs.go +++ /dev/null @@ -1,29 +0,0 @@ -package outputs - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "fmt" -) - -func UnmarshalOutputJSON(data []byte) (output execution.Output, err error) { - var details execution.OutputDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal output details: %w", err) - return - } - - switch details.Type { - case execution.ArtifactOutputType: - output = &ArtifactOutput{} - default: - err = fmt.Errorf("unknown output type: %s", details.Type) - return - } - - if err = json.Unmarshal(data, output); err != nil { - err = fmt.Errorf("failed to unmarshal %s output: %w", details.Type, err) - return - } - return -} diff --git a/Exesh/internal/domain/execution/result.go b/Exesh/internal/domain/execution/result.go deleted file mode 100644 index bd0427b0..00000000 --- a/Exesh/internal/domain/execution/result.go +++ /dev/null @@ -1,54 +0,0 @@ -package execution - -import ( - "errors" - "time" -) - -type ( - Result interface { - GetJobID() JobID - GetType() ResultType - GetDoneAt() time.Time - GetError() error - ShouldFinishExecution() bool - } - - ResultDetails struct { - ID JobID `json:"id"` - Type ResultType `json:"type"` - DoneAt time.Time `json:"done_at"` - Error string `json:"error,omitempty"` - } - - ResultType string -) - -const ( - CompileResult ResultType = "compile" - RunResult ResultType = "run" - CheckResult ResultType = "check" -) - -func (r ResultDetails) GetJobID() JobID { - return r.ID -} - -func (r ResultDetails) GetType() ResultType { - return r.Type -} - -func (r ResultDetails) GetDoneAt() time.Time { - return r.DoneAt -} - -func (r ResultDetails) GetError() error { - if r.Error == "" { - return nil - } - return errors.New(r.Error) -} - -func (r ResultDetails) ShouldFinishExecution() bool { - panic("this panic would never happen!") -} diff --git a/Exesh/internal/domain/execution/result/result.go b/Exesh/internal/domain/execution/result/result.go new file mode 100644 index 00000000..ac3897d5 --- /dev/null +++ b/Exesh/internal/domain/execution/result/result.go @@ -0,0 +1,56 @@ +package result + +import ( + "errors" + "exesh/internal/domain/execution/job" + "time" +) + +type ( + IResult interface { + GetType() Type + GetJobID() job.ID + GetStatus() job.Status + GetDoneAt() time.Time + GetError() error + } + + Details struct { + Type Type `json:"type"` + ID job.ID `json:"id"` + Status job.Status `json:"status"` + DoneAt time.Time `json:"done_at"` + Error string `json:"error,omitempty"` + } + + Type string +) + +const ( + Compile Type = "compile" + Run Type = "run" + Check Type = "check" +) + +func (res *Details) GetType() Type { + return res.Type +} + +func (res *Details) GetJobID() job.ID { + return res.ID +} + +func (res *Details) GetStatus() job.Status { + return res.Status +} + +func (res *Details) GetDoneAt() time.Time { + return res.DoneAt +} + +func (res *Details) GetError() error { + if res.Error == "" { + return nil + } + return errors.New(res.Error) +} diff --git a/Exesh/internal/domain/execution/result/results/check_result.go b/Exesh/internal/domain/execution/result/results/check_result.go new file mode 100644 index 00000000..89df6442 --- /dev/null +++ b/Exesh/internal/domain/execution/result/results/check_result.go @@ -0,0 +1,50 @@ +package results + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/result" + "time" +) + +type CheckResult struct { + result.Details +} + +func NewCheckResultOK(jobID job.ID) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Check, + ID: jobID, + Status: job.StatusOK, + DoneAt: time.Now(), + }, + }, + } +} + +func NewCheckResultWA(jobID job.ID) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Check, + ID: jobID, + Status: job.StatusWA, + DoneAt: time.Now(), + }, + }, + } +} + +func NewCheckResultErr(jobID job.ID, err string) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Check, + ID: jobID, + DoneAt: time.Now(), + Error: err, + }, + }, + } +} diff --git a/Exesh/internal/domain/execution/result/results/compile_result.go b/Exesh/internal/domain/execution/result/results/compile_result.go new file mode 100644 index 00000000..cea43d43 --- /dev/null +++ b/Exesh/internal/domain/execution/result/results/compile_result.go @@ -0,0 +1,52 @@ +package results + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/result" + "time" +) + +type CompileResult struct { + result.Details + CompilationError string `json:"compilation_error"` +} + +func NewCompileResultOK(jobID job.ID) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Compile, + ID: jobID, + Status: job.StatusOK, + DoneAt: time.Now(), + }, + }, + } +} + +func NewCompileResultCE(jobID job.ID, compilationError string) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Compile, + ID: jobID, + Status: job.StatusCE, + DoneAt: time.Now(), + }, + CompilationError: compilationError, + }, + } +} + +func NewCompileResultErr(jobID job.ID, err string) Result { + return Result{ + &CompileResult{ + Details: result.Details{ + Type: result.Compile, + ID: jobID, + DoneAt: time.Now(), + Error: err, + }, + }, + } +} diff --git a/Exesh/internal/domain/execution/result/results/result.go b/Exesh/internal/domain/execution/result/results/result.go new file mode 100644 index 00000000..3a54601a --- /dev/null +++ b/Exesh/internal/domain/execution/result/results/result.go @@ -0,0 +1,47 @@ +package results + +import ( + "encoding/json" + "exesh/internal/domain/execution/result" + "fmt" +) + +type Result struct { + result.IResult +} + +func (res *Result) UnmarshalJSON(data []byte) error { + var details result.Details + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal result details: %w", err) + } + + switch details.Type { + case result.Compile: + res.IResult = &CompileResult{} + case result.Run: + res.IResult = &RunResult{} + case result.Check: + res.IResult = &CheckResult{} + default: + return fmt.Errorf("unknown result type: %s", details.Type) + } + + if err := json.Unmarshal(data, res.IResult); err != nil { + return fmt.Errorf("failed to unmarshal %s result: %w", details.Type, err) + } + + return nil +} + +func (res *Result) AsCompile() *CompileResult { + return res.IResult.(*CompileResult) +} + +func (res *Result) AsRun() *RunResult { + return res.IResult.(*RunResult) +} + +func (res *Result) AsCheck() *CheckResult { + return res.IResult.(*CheckResult) +} diff --git a/Exesh/internal/domain/execution/result/results/run_result.go b/Exesh/internal/domain/execution/result/results/run_result.go new file mode 100644 index 00000000..cb786779 --- /dev/null +++ b/Exesh/internal/domain/execution/result/results/run_result.go @@ -0,0 +1,98 @@ +package results + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/result" + "time" +) + +type RunResult struct { + result.Details + HasOutput bool `json:"has_output"` + Output string `json:"output,omitempty"` +} + +func NewRunResultOK(jobID job.ID) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + Status: job.StatusOK, + DoneAt: time.Now(), + }, + HasOutput: false, + }, + } +} + +func NewRunResultWithOutput(jobID job.ID, out string) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + Status: job.StatusOK, + DoneAt: time.Now(), + }, + HasOutput: true, + Output: out, + }, + } +} + +func NewRunResultTL(jobID job.ID) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + Status: job.StatusTL, + DoneAt: time.Now(), + }, + HasOutput: false, + }, + } +} + +func NewRunResultML(jobID job.ID) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + Status: job.StatusML, + DoneAt: time.Now(), + }, + HasOutput: false, + }, + } +} + +func NewRunResultRE(jobID job.ID) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + Status: job.StatusRE, + DoneAt: time.Now(), + }, + HasOutput: false, + }, + } +} + +func NewRunResultErr(jobID job.ID, err string) Result { + return Result{ + &RunResult{ + Details: result.Details{ + Type: result.Run, + ID: jobID, + DoneAt: time.Now(), + Error: err, + }, + HasOutput: false, + }, + } +} diff --git a/Exesh/internal/domain/execution/results/check_result.go b/Exesh/internal/domain/execution/results/check_result.go deleted file mode 100644 index fe5c00df..00000000 --- a/Exesh/internal/domain/execution/results/check_result.go +++ /dev/null @@ -1,21 +0,0 @@ -package results - -import "exesh/internal/domain/execution" - -type ( - CheckResult struct { - execution.ResultDetails - Status CheckStatus `json:"status"` - } - - CheckStatus string -) - -const ( - CheckStatusOK CheckStatus = "OK" - CheckStatusWA CheckStatus = "WA" -) - -func (r CheckResult) ShouldFinishExecution() bool { - return r.Status != CheckStatusOK -} diff --git a/Exesh/internal/domain/execution/results/compile_result.go b/Exesh/internal/domain/execution/results/compile_result.go deleted file mode 100644 index 64de487e..00000000 --- a/Exesh/internal/domain/execution/results/compile_result.go +++ /dev/null @@ -1,22 +0,0 @@ -package results - -import "exesh/internal/domain/execution" - -type ( - CompileResult struct { - execution.ResultDetails - Status CompileStatus `json:"status"` - CompilationError string `json:"compilation_error"` - } - - CompileStatus string -) - -const ( - CompileStatusOK CompileStatus = "OK" - CompileStatusCE CompileStatus = "CE" -) - -func (r CompileResult) ShouldFinishExecution() bool { - return r.Status != CompileStatusOK -} diff --git a/Exesh/internal/domain/execution/results/results.go b/Exesh/internal/domain/execution/results/results.go deleted file mode 100644 index 755dfc54..00000000 --- a/Exesh/internal/domain/execution/results/results.go +++ /dev/null @@ -1,53 +0,0 @@ -package results - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "fmt" -) - -func UnmarshalResultJSON(data []byte) (result execution.Result, err error) { - var details execution.ResultDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal details: %w", err) - return - } - - switch details.Type { - case execution.CompileResult: - result = &CompileResult{} - case execution.RunResult: - result = &RunResult{} - case execution.CheckResult: - result = &CheckResult{} - default: - err = fmt.Errorf("unknown result type: %s", details.Type) - return - } - - if err = json.Unmarshal(data, result); err != nil { - err = fmt.Errorf("failed to unmarshal %s result: %w", details.Type, err) - return - } - return -} - -func UnmarshalResultsJSON(data []byte) (resultsArray []execution.Result, err error) { - var array []json.RawMessage - if err = json.Unmarshal(data, &array); err != nil { - err = fmt.Errorf("failed to unmarshal array: %w", err) - return - } - - resultsArray = make([]execution.Result, 0, len(array)) - for _, item := range array { - var result execution.Result - result, err = UnmarshalResultJSON(item) - if err != nil { - err = fmt.Errorf("failed to unmarshal result: %w", err) - return - } - resultsArray = append(resultsArray, result) - } - return -} diff --git a/Exesh/internal/domain/execution/results/run_result.go b/Exesh/internal/domain/execution/results/run_result.go deleted file mode 100644 index 08e37ab1..00000000 --- a/Exesh/internal/domain/execution/results/run_result.go +++ /dev/null @@ -1,27 +0,0 @@ -package results - -import ( - "exesh/internal/domain/execution" -) - -type ( - RunResult struct { - execution.ResultDetails - Status RunStatus `json:"status"` - HasOutput bool `json:"has_output"` - Output string `json:"output,omitempty"` - } - - RunStatus string -) - -const ( - RunStatusOK RunStatus = "OK" - RunStatusRE RunStatus = "RE" - RunStatusTL RunStatus = "TL" - RunStatusML RunStatus = "ML" -) - -func (r RunResult) ShouldFinishExecution() bool { - return r.Status != RunStatusOK -} diff --git a/Exesh/internal/domain/execution/source.go b/Exesh/internal/domain/execution/source.go deleted file mode 100644 index a4029409..00000000 --- a/Exesh/internal/domain/execution/source.go +++ /dev/null @@ -1,23 +0,0 @@ -package execution - -type ( - Source interface { - GetType() SourceType - } - - SourceDetails struct { - Type SourceType `json:"type"` - } - - SourceType string -) - -const ( - OtherStepSourceType SourceType = "other_step" - InlineSourceType SourceType = "inline" - FilestorageBucketSourceType SourceType = "filestorage_bucket" -) - -func (Source SourceDetails) GetType() SourceType { - return Source.Type -} diff --git a/Exesh/internal/domain/execution/source/id.go b/Exesh/internal/domain/execution/source/id.go new file mode 100644 index 00000000..aee56290 --- /dev/null +++ b/Exesh/internal/domain/execution/source/id.go @@ -0,0 +1,42 @@ +package source + +import ( + "crypto/sha1" + "encoding/json" + "fmt" +) + +type ID [2 * sha1.Size]byte + +func (id *ID) String() string { + return string(id[:]) +} + +func (id *ID) FromString(s string) error { + if len(s) != len(id) { + return fmt.Errorf("invalid hex string length") + } + for _, c := range s { + if '0' <= c && c <= '9' { + continue + } + if 'a' <= c && c <= 'f' { + continue + } + return fmt.Errorf("invalid hex string char: %c", c) + } + copy(id[:], s) + return nil +} + +func (id *ID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.String()) +} + +func (id *ID) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("id should be a string, got %s", data) + } + return id.FromString(s) +} diff --git a/Exesh/internal/domain/execution/source/source.go b/Exesh/internal/domain/execution/source/source.go new file mode 100644 index 00000000..4f90eff5 --- /dev/null +++ b/Exesh/internal/domain/execution/source/source.go @@ -0,0 +1,28 @@ +package source + +type ( + ISource interface { + GetType() Type + GetID() ID + } + + Details struct { + Type Type `json:"type"` + ID ID `json:"id"` + } + + Type string +) + +const ( + Inline Type = "inline" + FilestorageBucketFile Type = "filestorage_bucket_file" +) + +func (src *Details) GetType() Type { + return src.Type +} + +func (src *Details) GetID() ID { + return src.ID +} diff --git a/Exesh/internal/domain/execution/source/source_definition.go b/Exesh/internal/domain/execution/source/source_definition.go new file mode 100644 index 00000000..2127410a --- /dev/null +++ b/Exesh/internal/domain/execution/source/source_definition.go @@ -0,0 +1,30 @@ +package source + +type ( + IDefinition interface { + GetType() DefinitionType + GetName() DefinitionName + } + + DefinitionDetails struct { + Type DefinitionType `json:"type"` + Name DefinitionName `json:"name"` + } + + DefinitionType string + DefinitionName string +) + +const ( + InlineDefinition DefinitionType = "inline" + FilestorageBucketDefinition DefinitionType = "filestorage_bucket" + FilestorageBucketFileDefinition DefinitionType = "filestorage_bucket_file" +) + +func (def *DefinitionDetails) GetType() DefinitionType { + return def.Type +} + +func (def *DefinitionDetails) GetName() DefinitionName { + return def.Name +} diff --git a/Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source.go b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source.go new file mode 100644 index 00000000..cc308e52 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source.go @@ -0,0 +1,28 @@ +package sources + +import ( + "exesh/internal/domain/execution/source" + + "github.com/DIvanCode/filestorage/pkg/bucket" +) + +type FilestorageBucketFileSource struct { + source.Details + BucketID bucket.ID `json:"bucket_id"` + DownloadEndpoint string `json:"download_endpoint"` + File string `json:"file"` +} + +func NewFilestorageBucketFileSource(id source.ID, bucketID bucket.ID, downloadEndpoint string, file string) Source { + return Source{ + &FilestorageBucketFileSource{ + Details: source.Details{ + ID: id, + Type: source.FilestorageBucketFile, + }, + BucketID: bucketID, + DownloadEndpoint: downloadEndpoint, + File: file, + }, + } +} diff --git a/Exesh/internal/domain/execution/sources/filestorage_bucket_source.go b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source_definition.go similarity index 65% rename from Exesh/internal/domain/execution/sources/filestorage_bucket_source.go rename to Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source_definition.go index 1b48b05a..438f1d43 100644 --- a/Exesh/internal/domain/execution/sources/filestorage_bucket_source.go +++ b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_file_source_definition.go @@ -1,13 +1,13 @@ package sources import ( - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/source" "github.com/DIvanCode/filestorage/pkg/bucket" ) -type FilestorageBucketSource struct { - execution.SourceDetails +type FilestorageBucketFileSourceDefinition struct { + source.DefinitionDetails BucketID bucket.ID `json:"bucket_id"` DownloadEndpoint string `json:"download_endpoint"` File string `json:"file"` diff --git a/Exesh/internal/domain/execution/source/sources/filestorage_bucket_source_definition.go b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_source_definition.go new file mode 100644 index 00000000..8d0066b5 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/filestorage_bucket_source_definition.go @@ -0,0 +1,13 @@ +package sources + +import ( + "exesh/internal/domain/execution/source" + + "github.com/DIvanCode/filestorage/pkg/bucket" +) + +type FilestorageBucketSourceDefinition struct { + source.DefinitionDetails + BucketID bucket.ID `json:"bucket_id"` + DownloadEndpoint string `json:"download_endpoint"` +} diff --git a/Exesh/internal/domain/execution/source/sources/inline_source.go b/Exesh/internal/domain/execution/source/sources/inline_source.go new file mode 100644 index 00000000..52b4bda1 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/inline_source.go @@ -0,0 +1,22 @@ +package sources + +import ( + "exesh/internal/domain/execution/source" +) + +type InlineSource struct { + source.Details + Content string `json:"content"` +} + +func NewInlineSource(id source.ID, content string) Source { + return Source{ + &InlineSource{ + Details: source.Details{ + ID: id, + Type: source.Inline, + }, + Content: content, + }, + } +} diff --git a/Exesh/internal/domain/execution/source/sources/inline_source_definition.go b/Exesh/internal/domain/execution/source/sources/inline_source_definition.go new file mode 100644 index 00000000..5c5cdf15 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/inline_source_definition.go @@ -0,0 +1,10 @@ +package sources + +import ( + "exesh/internal/domain/execution/source" +) + +type InlineSourceDefinition struct { + source.DefinitionDetails + Content string `json:"content"` +} diff --git a/Exesh/internal/domain/execution/source/sources/source.go b/Exesh/internal/domain/execution/source/sources/source.go new file mode 100644 index 00000000..33a2f8b9 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/source.go @@ -0,0 +1,41 @@ +package sources + +import ( + "encoding/json" + "exesh/internal/domain/execution/source" + "fmt" +) + +type Source struct { + source.ISource +} + +func (src *Source) UnmarshalJSON(data []byte) error { + var details source.Details + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal source details: %w", err) + } + + switch details.Type { + case source.Inline: + src.ISource = &InlineSource{} + case source.FilestorageBucketFile: + src.ISource = &FilestorageBucketFileSource{} + default: + return fmt.Errorf("unknown source type: %s", details.Type) + } + + if err := json.Unmarshal(data, src.ISource); err != nil { + return fmt.Errorf("failed to unmarshal %s source: %w", details.Type, err) + } + + return nil +} + +func (src *Source) AsInline() *InlineSource { + return src.ISource.(*InlineSource) +} + +func (src *Source) AsFilestorageBucketFile() *FilestorageBucketFileSource { + return src.ISource.(*FilestorageBucketFileSource) +} diff --git a/Exesh/internal/domain/execution/source/sources/source_definition.go b/Exesh/internal/domain/execution/source/sources/source_definition.go new file mode 100644 index 00000000..54b43ec9 --- /dev/null +++ b/Exesh/internal/domain/execution/source/sources/source_definition.go @@ -0,0 +1,47 @@ +package sources + +import ( + "encoding/json" + "exesh/internal/domain/execution/source" + "fmt" +) + +type Definition struct { + source.IDefinition +} + +func (def *Definition) UnmarshalJSON(data []byte) error { + var details source.DefinitionDetails + if err := json.Unmarshal(data, &details); err != nil { + return fmt.Errorf("failed to unmarshal source definition details: %w", err) + } + + switch details.Type { + case source.InlineDefinition: + def.IDefinition = &InlineSourceDefinition{} + case source.FilestorageBucketDefinition: + def.IDefinition = &FilestorageBucketSourceDefinition{} + case source.FilestorageBucketFileDefinition: + def.IDefinition = &FilestorageBucketFileSourceDefinition{} + default: + return fmt.Errorf("unknown source type: %s", details.Type) + } + + if err := json.Unmarshal(data, def.IDefinition); err != nil { + return fmt.Errorf("failed to unmarshal %s source: %w", details.Type, err) + } + + return nil +} + +func (def *Definition) AsInlineDefinition() *InlineSourceDefinition { + return def.IDefinition.(*InlineSourceDefinition) +} + +func (def *Definition) AsFilestorageBucketDefinition() *FilestorageBucketSourceDefinition { + return def.IDefinition.(*FilestorageBucketSourceDefinition) +} + +func (def *Definition) AsFilestorageBucketFileDefinition() *FilestorageBucketFileSourceDefinition { + return def.IDefinition.(*FilestorageBucketFileSourceDefinition) +} diff --git a/Exesh/internal/domain/execution/sources/inline_source.go b/Exesh/internal/domain/execution/sources/inline_source.go deleted file mode 100644 index 87021c82..00000000 --- a/Exesh/internal/domain/execution/sources/inline_source.go +++ /dev/null @@ -1,10 +0,0 @@ -package sources - -import ( - "exesh/internal/domain/execution" -) - -type InlineSource struct { - execution.SourceDetails - Content string `json:"content"` -} diff --git a/Exesh/internal/domain/execution/sources/other_step_source.go b/Exesh/internal/domain/execution/sources/other_step_source.go deleted file mode 100644 index 4f11e344..00000000 --- a/Exesh/internal/domain/execution/sources/other_step_source.go +++ /dev/null @@ -1,10 +0,0 @@ -package sources - -import ( - "exesh/internal/domain/execution" -) - -type OtherStepSource struct { - execution.SourceDetails - StepName execution.StepName `json:"step_name"` -} diff --git a/Exesh/internal/domain/execution/sources/sources.go b/Exesh/internal/domain/execution/sources/sources.go deleted file mode 100644 index a8cf1989..00000000 --- a/Exesh/internal/domain/execution/sources/sources.go +++ /dev/null @@ -1,33 +0,0 @@ -package sources - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "fmt" -) - -func UnmarshalSourceJSON(data []byte) (Source execution.Source, err error) { - var details execution.SourceDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal source details: %w", err) - return - } - - switch details.Type { - case execution.OtherStepSourceType: - Source = &OtherStepSource{} - case execution.InlineSourceType: - Source = &InlineSource{} - case execution.FilestorageBucketSourceType: - Source = &FilestorageBucketSource{} - default: - err = fmt.Errorf("unknown source type: %s", details.Type) - return - } - - if err = json.Unmarshal(data, Source); err != nil { - err = fmt.Errorf("failed to unmarshal %s source: %w", details.Type, err) - return - } - return -} diff --git a/Exesh/internal/domain/execution/stage.go b/Exesh/internal/domain/execution/stage.go new file mode 100644 index 00000000..44e22223 --- /dev/null +++ b/Exesh/internal/domain/execution/stage.go @@ -0,0 +1,18 @@ +package execution + +import ( + "exesh/internal/domain/execution/job/jobs" +) + +type ( + Stage struct { + Name StageName `json:"name"` + Deps []StageName `json:"deps"` + Jobs []jobs.Job `json:"jobs"` + } + + StageName string +) + +func (stage *Stage) BuildGraph() { +} diff --git a/Exesh/internal/domain/execution/stage_definition.go b/Exesh/internal/domain/execution/stage_definition.go new file mode 100644 index 00000000..263e987d --- /dev/null +++ b/Exesh/internal/domain/execution/stage_definition.go @@ -0,0 +1,9 @@ +package execution + +import "exesh/internal/domain/execution/job/jobs" + +type StageDefinition struct { + Name StageName `json:"name"` + Deps []StageName `json:"deps"` + Jobs []jobs.Definition `json:"jobs"` +} diff --git a/Exesh/internal/domain/execution/stages_graph.go b/Exesh/internal/domain/execution/stages_graph.go new file mode 100644 index 00000000..ac9c250d --- /dev/null +++ b/Exesh/internal/domain/execution/stages_graph.go @@ -0,0 +1,115 @@ +package execution + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "sync" +) + +type stagesGraph struct { + mu sync.Mutex + + succStages map[StageName][]*Stage + doneDeps map[StageName]int + + toPick []*Stage + + jobGraphs map[StageName]*jobsGraph + stageByJobID map[job.ID]*Stage +} + +func newStagesGraph(stages []*Stage) *stagesGraph { + g := stagesGraph{ + mu: sync.Mutex{}, + + succStages: make(map[StageName][]*Stage), + doneDeps: make(map[StageName]int), + + toPick: make([]*Stage, 0), + + jobGraphs: make(map[StageName]*jobsGraph), + stageByJobID: make(map[job.ID]*Stage), + } + + for _, stage := range stages { + for _, dep := range stage.Deps { + if _, ok := g.succStages[dep]; !ok { + g.succStages[dep] = make([]*Stage, 0) + } + g.succStages[dep] = append(g.succStages[dep], stage) + } + + g.doneDeps[stage.Name] = 0 + if len(stage.Deps) == 0 { + g.toPick = append(g.toPick, stage) + } + + g.jobGraphs[stage.Name] = newJobsGraph(stage.Jobs) + for _, jb := range stage.Jobs { + g.stageByJobID[jb.GetID()] = stage + } + } + + return &g +} + +func (g *stagesGraph) pickJobs() []jobs.Job { + g.mu.Lock() + defer g.mu.Unlock() + + pickedJobs := make([]jobs.Job, 0) + for _, stage := range g.toPick { + pickedJobs = append(pickedJobs, g.jobGraphs[stage.Name].pickJobs()...) + } + + return pickedJobs +} + +func (g *stagesGraph) doneJob(jobID job.ID, jobStatus job.Status) { + g.mu.Lock() + defer g.mu.Unlock() + + stage := g.stageByJobID[jobID] + + var jb *jobs.Job + for i := range stage.Jobs { + if stage.Jobs[i].GetID() == jobID { + jb = &stage.Jobs[i] + break + } + } + if jb == nil { + return + } + + if jobStatus != jb.GetSuccessStatus() { + g.toPick = make([]*Stage, 0) + return + } + + g.jobGraphs[stage.Name].doneJob(jobID) + if g.jobGraphs[stage.Name].isDone() { + toPick := make([]*Stage, 0) + for i := range g.toPick { + if g.toPick[i].Name != stage.Name { + toPick = append(toPick, g.toPick[i]) + } + } + + for _, succStage := range g.succStages[stage.Name] { + g.doneDeps[succStage.Name]++ + if g.doneDeps[succStage.Name] == len(succStage.Deps) { + toPick = append(toPick, succStage) + } + } + + g.toPick = toPick + } +} + +func (g *stagesGraph) isDone() bool { + g.mu.Lock() + defer g.mu.Unlock() + + return len(g.toPick) == 0 +} diff --git a/Exesh/internal/domain/execution/step.go b/Exesh/internal/domain/execution/step.go deleted file mode 100644 index 06821589..00000000 --- a/Exesh/internal/domain/execution/step.go +++ /dev/null @@ -1,37 +0,0 @@ -package execution - -type ( - Step interface { - GetName() StepName - GetType() StepType - GetSources() []Source - GetDependencies() []StepName - GetAttributes() map[string]any - } - - StepDetails struct { - Name StepName `json:"name"` - Type StepType `json:"type"` - } - - StepName string - - StepType string -) - -const ( - CompileCppStepType StepType = "compile_cpp" - CompileGoStepType StepType = "compile_go" - RunCppStepType StepType = "run_cpp" - RunPyStepType StepType = "run_py" - RunGoStepType StepType = "run_go" - CheckCppStepType StepType = "check_cpp" -) - -func (step StepDetails) GetName() StepName { - return step.Name -} - -func (step StepDetails) GetType() StepType { - return step.Type -} diff --git a/Exesh/internal/domain/execution/steps/check_cpp_step.go b/Exesh/internal/domain/execution/steps/check_cpp_step.go deleted file mode 100644 index 5b51f9dc..00000000 --- a/Exesh/internal/domain/execution/steps/check_cpp_step.go +++ /dev/null @@ -1,55 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type CheckCppStep struct { - execution.StepDetails - CompiledChecker execution.Source `json:"compiled_checker"` - CorrectOutput execution.Source `json:"correct_output"` - SuspectOutput execution.Source `json:"suspect_output"` -} - -func (step CheckCppStep) GetSources() []execution.Source { - return []execution.Source{step.CompiledChecker, step.CorrectOutput, step.SuspectOutput} -} - -func (step CheckCppStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step CheckCppStep) GetAttributes() map[string]any { - return map[string]any{} -} - -func (step *CheckCppStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - CompiledChecker json.RawMessage `json:"compiled_checker"` - CorrectOutput json.RawMessage `json:"correct_output"` - SuspectOutput json.RawMessage `json:"suspect_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.CompiledChecker, err = sources.UnmarshalSourceJSON(attributes.CompiledChecker); err != nil { - return fmt.Errorf("failed to unmarshal compiled_checker source: %w", err) - } - if step.CorrectOutput, err = sources.UnmarshalSourceJSON(attributes.CorrectOutput); err != nil { - return fmt.Errorf("failed to unmarshal correct_output source: %w", err) - } - if step.SuspectOutput, err = sources.UnmarshalSourceJSON(attributes.SuspectOutput); err != nil { - return fmt.Errorf("failed to unmarshal suspect_output source: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/compile_cpp_step.go b/Exesh/internal/domain/execution/steps/compile_cpp_step.go deleted file mode 100644 index 93491526..00000000 --- a/Exesh/internal/domain/execution/steps/compile_cpp_step.go +++ /dev/null @@ -1,45 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type CompileCppStep struct { - execution.StepDetails - Code execution.Source `json:"code"` -} - -func (step CompileCppStep) GetSources() []execution.Source { - return []execution.Source{step.Code} -} - -func (step CompileCppStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step CompileCppStep) GetAttributes() map[string]any { - return map[string]any{} -} - -func (step *CompileCppStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.Code, err = sources.UnmarshalSourceJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code source: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/compile_go_step.go b/Exesh/internal/domain/execution/steps/compile_go_step.go deleted file mode 100644 index 252503b9..00000000 --- a/Exesh/internal/domain/execution/steps/compile_go_step.go +++ /dev/null @@ -1,45 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type CompileGoStep struct { - execution.StepDetails - Code execution.Source `json:"code"` -} - -func (step CompileGoStep) GetSources() []execution.Source { - return []execution.Source{step.Code} -} - -func (step CompileGoStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step CompileGoStep) GetAttributes() map[string]any { - return map[string]any{} -} - -func (step *CompileGoStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.Code, err = sources.UnmarshalSourceJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code source: %w", err) - } - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/run_cpp_step.go b/Exesh/internal/domain/execution/steps/run_cpp_step.go deleted file mode 100644 index e068c511..00000000 --- a/Exesh/internal/domain/execution/steps/run_cpp_step.go +++ /dev/null @@ -1,63 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type RunCppStep struct { - execution.StepDetails - CompiledCode execution.Source `json:"compiled_code"` - RunInput execution.Source `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func (step RunCppStep) GetSources() []execution.Source { - return []execution.Source{step.CompiledCode, step.RunInput} -} - -func (step RunCppStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step RunCppStep) GetAttributes() map[string]any { - return map[string]any{ - "time_limit": step.TimeLimit, - "memory_limit": step.MemoryLimit, - "show_output": step.ShowOutput, - } -} - -func (step *RunCppStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - CompiledCode json.RawMessage `json:"compiled_code"` - RunInput json.RawMessage `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.CompiledCode, err = sources.UnmarshalSourceJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal compiled_code source: %w", err) - } - if step.RunInput, err = sources.UnmarshalSourceJSON(attributes.RunInput); err != nil { - return fmt.Errorf("failed to unmarshal run_input source: %w", err) - } - step.TimeLimit = attributes.TimeLimit - step.MemoryLimit = attributes.MemoryLimit - step.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/run_go_step.go b/Exesh/internal/domain/execution/steps/run_go_step.go deleted file mode 100644 index 58f0f56a..00000000 --- a/Exesh/internal/domain/execution/steps/run_go_step.go +++ /dev/null @@ -1,63 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type RunGoStep struct { - execution.StepDetails - CompiledCode execution.Source `json:"compiled_code"` - RunInput execution.Source `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func (step RunGoStep) GetSources() []execution.Source { - return []execution.Source{step.CompiledCode, step.RunInput} -} - -func (step RunGoStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step RunGoStep) GetAttributes() map[string]any { - return map[string]any{ - "time_limit": step.TimeLimit, - "memory_limit": step.MemoryLimit, - "show_output": step.ShowOutput, - } -} - -func (step *RunGoStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - CompiledCode json.RawMessage `json:"compiled_code"` - RunInput json.RawMessage `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.CompiledCode, err = sources.UnmarshalSourceJSON(attributes.CompiledCode); err != nil { - return fmt.Errorf("failed to unmarshal compiled_code source: %w", err) - } - if step.RunInput, err = sources.UnmarshalSourceJSON(attributes.RunInput); err != nil { - return fmt.Errorf("failed to unmarshal run_input source: %w", err) - } - step.TimeLimit = attributes.TimeLimit - step.MemoryLimit = attributes.MemoryLimit - step.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/run_py_step.go b/Exesh/internal/domain/execution/steps/run_py_step.go deleted file mode 100644 index cc1a6e6a..00000000 --- a/Exesh/internal/domain/execution/steps/run_py_step.go +++ /dev/null @@ -1,63 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -type RunPyStep struct { - execution.StepDetails - Code execution.Source `json:"code"` - RunInput execution.Source `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` -} - -func (step RunPyStep) GetSources() []execution.Source { - return []execution.Source{step.Code, step.RunInput} -} - -func (step RunPyStep) GetDependencies() []execution.StepName { - return getDependencies(step) -} - -func (step RunPyStep) GetAttributes() map[string]any { - return map[string]any{ - "time_limit": step.TimeLimit, - "memory_limit": step.MemoryLimit, - "show_output": step.ShowOutput, - } -} - -func (step *RunPyStep) UnmarshalJSON(data []byte) error { - var err error - if err = json.Unmarshal(data, &step.StepDetails); err != nil { - return fmt.Errorf("failed to unmarshal step details: %w", err) - } - - attributes := struct { - Code json.RawMessage `json:"code"` - RunInput json.RawMessage `json:"run_input"` - TimeLimit int `json:"time_limit"` - MemoryLimit int `json:"memory_limit"` - ShowOutput bool `json:"show_output"` - }{} - if err = json.Unmarshal(data, &attributes); err != nil { - return fmt.Errorf("failed to unmarshal %s step attributes: %w", step.Type, err) - } - - if step.Code, err = sources.UnmarshalSourceJSON(attributes.Code); err != nil { - return fmt.Errorf("failed to unmarshal code source: %w", err) - } - if step.RunInput, err = sources.UnmarshalSourceJSON(attributes.RunInput); err != nil { - return fmt.Errorf("failed to unmarshal run_input source: %w", err) - } - step.TimeLimit = attributes.TimeLimit - step.MemoryLimit = attributes.MemoryLimit - step.ShowOutput = attributes.ShowOutput - - return nil -} diff --git a/Exesh/internal/domain/execution/steps/steps.go b/Exesh/internal/domain/execution/steps/steps.go deleted file mode 100644 index e649642c..00000000 --- a/Exesh/internal/domain/execution/steps/steps.go +++ /dev/null @@ -1,74 +0,0 @@ -package steps - -import ( - "encoding/json" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/sources" - "fmt" -) - -func UnmarshalStepJSON(data []byte) (step execution.Step, err error) { - var details execution.StepDetails - if err = json.Unmarshal(data, &details); err != nil { - err = fmt.Errorf("failed to unmarshal step details: %w", err) - return step, err - } - - switch details.Type { - case execution.CompileCppStepType: - step = &CompileCppStep{} - case execution.CompileGoStepType: - step = &CompileGoStep{} - case execution.RunCppStepType: - step = &RunCppStep{} - case execution.RunGoStepType: - step = &RunGoStep{} - case execution.RunPyStepType: - step = &RunPyStep{} - case execution.CheckCppStepType: - step = &CheckCppStep{} - default: - err = fmt.Errorf("unknown step type: %s", details.Type) - return step, err - } - - if err = json.Unmarshal(data, step); err != nil { - err = fmt.Errorf("failed to unmarshal %s step: %w", details.Type, err) - return step, err - } - return step, err -} - -func UnmarshalStepsJSON(data []byte) (stepsArray []execution.Step, err error) { - var array []json.RawMessage - if err = json.Unmarshal(data, &array); err != nil { - err = fmt.Errorf("failed to unmarshal steps array: %w", err) - return stepsArray, err - } - - stepsArray = make([]execution.Step, 0, len(array)) - for _, item := range array { - var step execution.Step - step, err = UnmarshalStepJSON(item) - if err != nil { - err = fmt.Errorf("failed to unmarshal step: %w", err) - return stepsArray, err - } - stepsArray = append(stepsArray, step) - } - return stepsArray, err -} - -func getDependencies(step execution.Step) []execution.StepName { - dependencies := make(map[execution.StepName]any) - for _, source := range step.GetSources() { - if otherStepSource, ok := source.(*sources.OtherStepSource); ok { - dependencies[otherStepSource.StepName] = struct{}{} - } - } - result := make([]execution.StepName, 0, len(dependencies)) - for stepName := range dependencies { - result = append(result, stepName) - } - return result -} diff --git a/Exesh/internal/executor/executors/check_cpp_job_executor.go b/Exesh/internal/executor/executors/check_cpp_job_executor.go index a1536915..a323c0b9 100644 --- a/Exesh/internal/executor/executors/check_cpp_job_executor.go +++ b/Exesh/internal/executor/executors/check_cpp_job_executor.go @@ -3,91 +3,65 @@ package executors import ( "bytes" "context" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/runtime" "fmt" "io" "log/slog" - "time" - - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" - "exesh/internal/runtime" ) type CheckCppJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewCheckCppJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *CheckCppJobExecutor { +func NewCheckCppJobExecutor( + log *slog.Logger, + sourceProvider sourceProvider, + outputProvider outputProvider, + runtime runtime.Runtime, +) *CheckCppJobExecutor { return &CheckCppJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, - runtime: rt, + runtime: runtime, } } -func (e *CheckCppJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.CheckCppJobType +func (e *CheckCppJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.CheckCpp } -func (e *CheckCppJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.CheckResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CheckResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } - } - - okResult := func() execution.Result { - return results.CheckResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CheckResult, - DoneAt: time.Now(), - }, - Status: results.CheckStatusOK, - } - } - - wrongAnswerResult := func() execution.Result { - return results.CheckResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CheckResult, - DoneAt: time.Now(), - }, - Status: results.CheckStatusWA, - } +func (e *CheckCppJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewCheckResultErr(jb.GetID(), err.Error()) } - if job.GetType() != execution.CheckCppJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.CheckCppJobType)) + if jb.GetType() != job.CheckCpp { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.CheckCpp)) } - checkCppJob := job.(*jobs.CheckCppJob) + checkCppJob := jb.AsCheckCpp() - compiledChecker, unlock, err := e.inputProvider.Locate(ctx, checkCppJob.CompiledChecker) + compiledChecker, unlock, err := e.sourceProvider.Locate(ctx, checkCppJob.CompiledChecker.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate compiled_checker input: %w", err)) } defer unlock() - correctOutput, unlock, err := e.inputProvider.Locate(ctx, checkCppJob.CorrectOutput) + correctOutput, unlock, err := e.sourceProvider.Locate(ctx, checkCppJob.CorrectOutput.SourceID) if err != nil { - return errorResult(fmt.Errorf("failed to read correct_output input: %w", err)) + return errorResult(fmt.Errorf("failed to locate correct_output input: %w", err)) } defer unlock() - suspectOutput, unlock, err := e.inputProvider.Locate(ctx, checkCppJob.SuspectOutput) + suspectOutput, unlock, err := e.sourceProvider.Locate(ctx, checkCppJob.SuspectOutput.SourceID) if err != nil { - return errorResult(fmt.Errorf("failed to read suspect_output input: %w", err)) + return errorResult(fmt.Errorf("failed to locate suspect_output input: %w", err)) } defer unlock() @@ -118,11 +92,11 @@ func (e *CheckCppJobExecutor) Execute(ctx context.Context, job execution.Job) ex return errorResult(fmt.Errorf("failed to read check_verdict output: %w", err)) } - if string(checkVerdictOutput) == string(results.CheckStatusOK) { - return okResult() + if string(checkVerdictOutput) == string(job.StatusOK) { + return results.NewCheckResultOK(jb.GetID()) } - if string(checkVerdictOutput) == string(results.CheckStatusWA) { - return wrongAnswerResult() + if string(checkVerdictOutput) == string(job.StatusWA) { + return results.NewCheckResultWA(jb.GetID()) } return errorResult(fmt.Errorf("failed to parse check_verdict output: %s", string(checkVerdictOutput))) } diff --git a/Exesh/internal/executor/executors/compile_cpp_job_executor.go b/Exesh/internal/executor/executors/compile_cpp_job_executor.go index 1815f6cf..00d15a04 100644 --- a/Exesh/internal/executor/executors/compile_cpp_job_executor.go +++ b/Exesh/internal/executor/executors/compile_cpp_job_executor.go @@ -3,83 +3,51 @@ package executors import ( "bytes" "context" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/runtime" "fmt" "log/slog" - "time" - - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" - "exesh/internal/runtime" ) type CompileCppJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewCompileCppJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *CompileCppJobExecutor { +func NewCompileCppJobExecutor(log *slog.Logger, sourceProvider sourceProvider, outputProvider outputProvider, rt runtime.Runtime) *CompileCppJobExecutor { return &CompileCppJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, runtime: rt, } } -func (e *CompileCppJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.CompileCppJobType +func (e *CompileCppJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.CompileCpp } -func (e *CompileCppJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } - } - - compilationErrorResult := func(compilationError string) execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - }, - Status: results.CompileStatusCE, - CompilationError: compilationError, - } - } - - okResult := func() execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - }, - Status: results.CompileStatusOK, - } +func (e *CompileCppJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewCompileResultErr(jb.GetID(), err.Error()) } - if job.GetType() != execution.CompileCppJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.CompileCppJobType)) + if jb.GetType() != job.CompileCpp { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.CompileCpp)) } - compileCppJob := job.(*jobs.CompileCppJob) + compileCppJob := jb.AsCompileCpp() - code, unlock, err := e.inputProvider.Locate(ctx, compileCppJob.Code) + code, unlock, err := e.sourceProvider.Locate(ctx, compileCppJob.Code.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate code input: %w", err)) } defer unlock() - compiledCode, commitOutput, abortOutput, err := e.outputProvider.Reserve(ctx, compileCppJob.CompiledCode) + compiledCode, commitOutput, abortOutput, err := e.outputProvider.Reserve(ctx, jb.GetID(), compileCppJob.CompiledCode.File) if err != nil { return errorResult(fmt.Errorf("failed to locate compiled_code output: %w", err)) } @@ -110,7 +78,7 @@ func (e *CompileCppJobExecutor) Execute(ctx context.Context, job execution.Job) }) if err != nil { e.log.Error("execute g++ in runtime error", slog.Any("err", err)) - return compilationErrorResult(stderr.String()) + return results.NewCompileResultCE(jb.GetID(), stderr.String()) } e.log.Info("command ok") @@ -119,5 +87,5 @@ func (e *CompileCppJobExecutor) Execute(ctx context.Context, job execution.Job) return errorResult(commitErr) } - return okResult() + return results.NewCompileResultOK(jb.GetID()) } diff --git a/Exesh/internal/executor/executors/compile_go_job_executor.go b/Exesh/internal/executor/executors/compile_go_job_executor.go index 8f8b9a70..e21231d6 100644 --- a/Exesh/internal/executor/executors/compile_go_job_executor.go +++ b/Exesh/internal/executor/executors/compile_go_job_executor.go @@ -3,83 +3,50 @@ package executors import ( "bytes" "context" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/runtime" "fmt" "log/slog" - "time" - - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" - "exesh/internal/runtime" ) type CompileGoJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewCompileGoJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *CompileGoJobExecutor { +func NewCompileGoJobExecutor(log *slog.Logger, sourceProvider sourceProvider, outputProvider outputProvider, rt runtime.Runtime) *CompileGoJobExecutor { return &CompileGoJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, runtime: rt, } } -func (e *CompileGoJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.CompileGoJobType +func (e *CompileGoJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.CompileGo } -func (e *CompileGoJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } +func (e *CompileGoJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewCompileResultErr(jb.GetID(), err.Error()) } - - compilationErrorResult := func(compilationError string) execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - }, - Status: results.CompileStatusCE, - CompilationError: compilationError, - } - } - - okResult := func() execution.Result { - return results.CompileResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.CompileResult, - DoneAt: time.Now(), - }, - Status: results.CompileStatusOK, - } - } - - if job.GetType() != execution.CompileGoJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.CompileGoJobType)) + if jb.GetType() != job.CompileGo { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.CompileGo)) } - compileGoJob := job.(*jobs.CompileGoJob) + compileGoJob := jb.AsCompileGo() - code, unlock, err := e.inputProvider.Locate(ctx, compileGoJob.Code) + code, unlock, err := e.sourceProvider.Locate(ctx, compileGoJob.Code.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate code input: %w", err)) } defer unlock() - compiledCode, commitOutput, abortOutput, err := e.outputProvider.Reserve(ctx, compileGoJob.CompiledCode) + compiledCode, commitOutput, abortOutput, err := e.outputProvider.Reserve(ctx, jb.GetID(), compileGoJob.CompiledCode.File) if err != nil { return errorResult(fmt.Errorf("failed to locate compiled_code output: %w", err)) } @@ -110,7 +77,7 @@ func (e *CompileGoJobExecutor) Execute(ctx context.Context, job execution.Job) e }) if err != nil { e.log.Error("execute go build in runtime error", slog.Any("err", err)) - return compilationErrorResult(stderr.String()) + return results.NewCompileResultErr(jb.GetID(), stderr.String()) } e.log.Info("command ok") @@ -119,5 +86,5 @@ func (e *CompileGoJobExecutor) Execute(ctx context.Context, job execution.Job) e return errorResult(commitErr) } - return okResult() + return results.NewCompileResultOK(jb.GetID()) } diff --git a/Exesh/internal/executor/executors/executors.go b/Exesh/internal/executor/executors/executors.go index 4d53a36e..2e9b1a12 100644 --- a/Exesh/internal/executor/executors/executors.go +++ b/Exesh/internal/executor/executors/executors.go @@ -2,19 +2,20 @@ package executors import ( "context" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/source" "io" ) type ( - inputProvider interface { - Locate(context.Context, execution.Input) (path string, unlock func(), err error) - Read(context.Context, execution.Input) (r io.Reader, unlock func(), err error) + sourceProvider interface { + Locate(context.Context, source.ID) (path string, unlock func(), err error) + Read(context.Context, source.ID) (r io.Reader, unlock func(), err error) } outputProvider interface { - Reserve(context.Context, execution.Output) (path string, commit, abort func() error, err error) - Create(context.Context, execution.Output) (w io.Writer, commit, abort func() error, err error) - Read(context.Context, execution.Output) (r io.Reader, unlock func(), err error) + Reserve(context.Context, job.ID, string) (path string, commit, abort func() error, err error) + Read(context.Context, job.ID, string) (r io.Reader, unlock func(), err error) + Create(context.Context, job.ID, string) (w io.Writer, commit, abort func() error, err error) } ) diff --git a/Exesh/internal/executor/executors/run_cpp_job_executor.go b/Exesh/internal/executor/executors/run_cpp_job_executor.go index b272ed68..b263a32a 100644 --- a/Exesh/internal/executor/executors/run_cpp_job_executor.go +++ b/Exesh/internal/executor/executors/run_cpp_job_executor.go @@ -4,9 +4,9 @@ import ( "bytes" "context" "errors" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" "exesh/internal/runtime" "fmt" "io" @@ -16,112 +16,47 @@ import ( type RunCppJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewRunCppJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *RunCppJobExecutor { +func NewRunCppJobExecutor(log *slog.Logger, sourceProvider sourceProvider, outputProvider outputProvider, rt runtime.Runtime) *RunCppJobExecutor { return &RunCppJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, runtime: rt, } } -func (e *RunCppJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.RunCppJobType +func (e *RunCppJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.RunCpp } -func (e *RunCppJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } - } - - runtimeErrorResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusRE, - } - } - - okResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - } - } - - okResultWithOutput := func(output string) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - HasOutput: true, - Output: output, - } - } - - tlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusTL, - HasOutput: false, - } - } - - mlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusML, - } +func (e *RunCppJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewRunResultErr(jb.GetID(), err.Error()) } - if job.GetType() != execution.RunCppJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.RunCppJobType)) + if jb.GetType() != job.RunCpp { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.RunCpp)) } - runCppJob := job.(*jobs.RunCppJob) + runCppJob := jb.AsRunCpp() - compiledCode, unlock, err := e.inputProvider.Locate(ctx, runCppJob.CompiledCode) + compiledCode, unlock, err := e.sourceProvider.Locate(ctx, runCppJob.CompiledCode.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate compiled_code input: %w", err)) } defer unlock() - runInput, unlock, err := e.inputProvider.Read(ctx, runCppJob.RunInput) + runInput, unlock, err := e.sourceProvider.Read(ctx, runCppJob.RunInput.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to read run_input input: %w", err)) } defer unlock() - runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, runCppJob.RunOutput) + runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runCppJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } @@ -156,12 +91,12 @@ func (e *RunCppJobExecutor) Execute(ctx context.Context, job execution.Job) exec if err != nil { e.log.Error("execute binary in runtime error", slog.Any("err", err)) if errors.Is(err, runtime.ErrTimeout) { - return tlResult() + return results.NewRunResultTL(jb.GetID()) } if errors.Is(err, runtime.ErrOutOfMemory) { - return mlResult() + return results.NewRunResultML(jb.GetID()) } - return runtimeErrorResult() + return results.NewRunResultRE(jb.GetID()) } e.log.Info("command ok") @@ -171,20 +106,20 @@ func (e *RunCppJobExecutor) Execute(ctx context.Context, job execution.Job) exec } if !runCppJob.ShowOutput { - return okResult() + return results.NewRunResultOK(jb.GetID()) } - runOutputReader, unlock, err := e.outputProvider.Read(ctx, runCppJob.RunOutput) + runOutputReader, unlock, err := e.outputProvider.Read(ctx, jb.GetID(), runCppJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to open run output: %w", err)) } defer unlock() - output, err := io.ReadAll(runOutputReader) + out, err := io.ReadAll(runOutputReader) if err != nil { return errorResult(fmt.Errorf("failed to read run output: %w", err)) } - return okResultWithOutput(string(output)) + return results.NewRunResultWithOutput(jb.GetID(), string(out)) } diff --git a/Exesh/internal/executor/executors/run_go_job_executor.go b/Exesh/internal/executor/executors/run_go_job_executor.go index 1480e028..62f2d2d5 100644 --- a/Exesh/internal/executor/executors/run_go_job_executor.go +++ b/Exesh/internal/executor/executors/run_go_job_executor.go @@ -4,125 +4,60 @@ import ( "bytes" "context" "errors" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" "fmt" "io" "log/slog" "time" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" + "exesh/internal/domain/execution/result/results" "exesh/internal/runtime" ) type RunGoJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewRunGoJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *RunGoJobExecutor { +func NewRunGoJobExecutor(log *slog.Logger, sourceProvider sourceProvider, outputProvider outputProvider, rt runtime.Runtime) *RunGoJobExecutor { return &RunGoJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, runtime: rt, } } -func (e *RunGoJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.RunGoJobType +func (e *RunGoJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.RunGo } -func (e *RunGoJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } +func (e *RunGoJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewRunResultErr(jb.GetID(), err.Error()) } - runtimeErrorResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusRE, - } + if jb.GetType() != job.RunGo { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.RunGo)) } + runGoJob := jb.AsRunGo() - okResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - } - } - - okResultWithOutput := func(output string) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - HasOutput: true, - Output: output, - } - } - - tlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusTL, - HasOutput: false, - } - } - - mlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusML, - } - } - - if job.GetType() != execution.RunGoJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.RunGoJobType)) - } - runGoJob := job.(*jobs.RunGoJob) - - compiledCode, unlock, err := e.inputProvider.Locate(ctx, runGoJob.CompiledCode) + compiledCode, unlock, err := e.sourceProvider.Locate(ctx, runGoJob.CompiledCode.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate compiled_code input: %w", err)) } defer unlock() - runInput, unlock, err := e.inputProvider.Read(ctx, runGoJob.RunInput) + runInput, unlock, err := e.sourceProvider.Read(ctx, runGoJob.RunInput.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to read run_input input: %w", err)) } defer unlock() - runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, runGoJob.RunOutput) + runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runGoJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } @@ -157,12 +92,12 @@ func (e *RunGoJobExecutor) Execute(ctx context.Context, job execution.Job) execu if err != nil { e.log.Error("execute binary in runtime error", slog.Any("err", err)) if errors.Is(err, runtime.ErrTimeout) { - return tlResult() + return results.NewRunResultTL(jb.GetID()) } if errors.Is(err, runtime.ErrOutOfMemory) { - return mlResult() + return results.NewRunResultML(jb.GetID()) } - return runtimeErrorResult() + return results.NewRunResultRE(jb.GetID()) } e.log.Info("command ok") @@ -171,24 +106,20 @@ func (e *RunGoJobExecutor) Execute(ctx context.Context, job execution.Job) execu return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) } - if err != nil { - return runtimeErrorResult() - } - if !runGoJob.ShowOutput { - return okResult() + return results.NewRunResultOK(jb.GetID()) } - runOutputReader, unlock, err := e.outputProvider.Read(ctx, runGoJob.RunOutput) + runOutputReader, unlock, err := e.outputProvider.Read(ctx, jb.GetID(), runGoJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to open run output: %w", err)) } defer unlock() - output, err := io.ReadAll(runOutputReader) + out, err := io.ReadAll(runOutputReader) if err != nil { return errorResult(fmt.Errorf("failed to read run output: %w", err)) } - return okResultWithOutput(string(output)) + return results.NewRunResultWithOutput(jb.GetID(), string(out)) } diff --git a/Exesh/internal/executor/executors/run_py_job_executor.go b/Exesh/internal/executor/executors/run_py_job_executor.go index 72e6315c..f05ee62a 100644 --- a/Exesh/internal/executor/executors/run_py_job_executor.go +++ b/Exesh/internal/executor/executors/run_py_job_executor.go @@ -4,9 +4,9 @@ import ( "bytes" "context" "errors" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/results" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" "exesh/internal/runtime" "fmt" "io" @@ -16,112 +16,47 @@ import ( type RunPyJobExecutor struct { log *slog.Logger - inputProvider inputProvider + sourceProvider sourceProvider outputProvider outputProvider runtime runtime.Runtime } -func NewRunPyJobExecutor(log *slog.Logger, inputProvider inputProvider, outputProvider outputProvider, rt runtime.Runtime) *RunPyJobExecutor { +func NewRunPyJobExecutor(log *slog.Logger, sourceProvider sourceProvider, outputProvider outputProvider, rt runtime.Runtime) *RunPyJobExecutor { return &RunPyJobExecutor{ log: log, - inputProvider: inputProvider, + sourceProvider: sourceProvider, outputProvider: outputProvider, runtime: rt, } } -func (e *RunPyJobExecutor) SupportsType(jobType execution.JobType) bool { - return jobType == execution.RunPyJobType +func (e *RunPyJobExecutor) SupportsType(jobType job.Type) bool { + return jobType == job.RunPy } -func (e *RunPyJobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { - errorResult := func(err error) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - Error: err.Error(), - }, - } - } - - runtimeErrorResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusRE, - } - } - - okResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - } - } - - okResultWithOutput := func(output string) execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusOK, - HasOutput: true, - Output: output, - } - } - - tlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusTL, - HasOutput: false, - } - } - - mlResult := func() execution.Result { - return results.RunResult{ - ResultDetails: execution.ResultDetails{ - ID: job.GetID(), - Type: execution.RunResult, - DoneAt: time.Now(), - }, - Status: results.RunStatusML, - } +func (e *RunPyJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + errorResult := func(err error) results.Result { + return results.NewRunResultErr(jb.GetID(), err.Error()) } - if job.GetType() != execution.RunPyJobType { - return errorResult(fmt.Errorf("unsupported job type %s for %s executor", job.GetType(), execution.RunPyJobType)) + if jb.GetType() != job.RunPy { + return errorResult(fmt.Errorf("unsupported job type %s for %s executor", jb.GetType(), job.RunPy)) } - runPyJob := job.(*jobs.RunPyJob) + runPyJob := jb.AsRunPy() - code, unlock, err := e.inputProvider.Locate(ctx, runPyJob.Code) + code, unlock, err := e.sourceProvider.Locate(ctx, runPyJob.Code.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to locate code input: %w", err)) } defer unlock() - runInput, unlock, err := e.inputProvider.Read(ctx, runPyJob.RunInput) + runInput, unlock, err := e.sourceProvider.Read(ctx, runPyJob.RunInput.SourceID) if err != nil { return errorResult(fmt.Errorf("failed to read run_input input: %w", err)) } defer unlock() - runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, runPyJob.RunOutput) + runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runPyJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } @@ -156,12 +91,12 @@ func (e *RunPyJobExecutor) Execute(ctx context.Context, job execution.Job) execu if err != nil { e.log.Error("execute binary in runtime error", slog.Any("err", err)) if errors.Is(err, runtime.ErrTimeout) { - return tlResult() + return results.NewRunResultTL(jb.GetID()) } if errors.Is(err, runtime.ErrOutOfMemory) { - return mlResult() + return results.NewRunResultML(jb.GetID()) } - return runtimeErrorResult() + return results.NewRunResultRE(jb.GetID()) } e.log.Info("command ok") @@ -171,19 +106,19 @@ func (e *RunPyJobExecutor) Execute(ctx context.Context, job execution.Job) execu } if !runPyJob.ShowOutput { - return okResult() + return results.NewRunResultOK(jb.GetID()) } - runOutputReader, unlock, err := e.outputProvider.Read(ctx, runPyJob.RunOutput) + runOutputReader, unlock, err := e.outputProvider.Read(ctx, jb.GetID(), runPyJob.RunOutput.File) if err != nil { return errorResult(fmt.Errorf("failed to open run output: %w", err)) } defer unlock() - output, err := io.ReadAll(runOutputReader) + out, err := io.ReadAll(runOutputReader) if err != nil { return errorResult(fmt.Errorf("failed to read run output: %w", err)) } - return okResultWithOutput(string(output)) + return results.NewRunResultWithOutput(jb.GetID(), string(out)) } diff --git a/Exesh/internal/executor/job_executor.go b/Exesh/internal/executor/job_executor.go index dc13fb4d..474e3160 100644 --- a/Exesh/internal/executor/job_executor.go +++ b/Exesh/internal/executor/job_executor.go @@ -2,9 +2,9 @@ package executor import ( "context" - "exesh/internal/domain/execution" - "fmt" - "time" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" ) type ( @@ -13,8 +13,8 @@ type ( } jobExecutor interface { - SupportsType(execution.JobType) bool - Execute(context.Context, execution.Job) execution.Result + SupportsType(job.Type) bool + Execute(context.Context, jobs.Job) results.Result } ) @@ -22,15 +22,13 @@ func NewJobExecutor(executors ...jobExecutor) *JobExecutor { return &JobExecutor{executors: executors} } -func (e *JobExecutor) Execute(ctx context.Context, job execution.Job) execution.Result { +func (e *JobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Result { + var res results.Result for _, executor := range e.executors { - if executor.SupportsType(job.GetType()) { - return executor.Execute(ctx, job) + if executor.SupportsType(jb.GetType()) { + res = executor.Execute(ctx, jb) + break } } - return execution.ResultDetails{ - ID: job.GetID(), - DoneAt: time.Now(), - Error: fmt.Errorf("executor for %s job not found", job.GetType()).Error(), - } + return res } diff --git a/Exesh/internal/factory/execution_factory.go b/Exesh/internal/factory/execution_factory.go new file mode 100644 index 00000000..03cc6daf --- /dev/null +++ b/Exesh/internal/factory/execution_factory.go @@ -0,0 +1,348 @@ +package factory + +import ( + "context" + "crypto/sha1" + "exesh/internal/config" + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/input/inputs" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/output" + "exesh/internal/domain/execution/source" + "exesh/internal/domain/execution/source/sources" + "fmt" + "github.com/DIvanCode/filestorage/pkg/bucket" + "time" +) + +type ( + ExecutionFactory struct { + cfg config.JobFactoryConfig + filestorage filestorage + } + + filestorage interface { + DownloadBucket(context.Context, bucket.ID, time.Duration, string) error + DownloadFile(context.Context, bucket.ID, string, time.Duration, string) error + } +) + +func NewExecutionFactory( + cfg config.JobFactoryConfig, + filestorage filestorage, +) *ExecutionFactory { + return &ExecutionFactory{ + cfg: cfg, + filestorage: filestorage, + } +} + +func (f *ExecutionFactory) Create(ctx context.Context, def execution.Definition) (*execution.Execution, error) { + ex := execution.NewExecution(def) + + for _, srcDef := range def.Sources { + if err := f.saveSource(ctx, ex, srcDef); err != nil { + return nil, fmt.Errorf("failed to save source '%s': %w", srcDef.GetName(), err) + } + } + + for _, stageDef := range def.Stages { + stage, err := f.createStage(ex, stageDef) + if err != nil { + return nil, fmt.Errorf("failed to create stage '%s': %w", stageDef.Name, err) + } + + ex.Stages = append(ex.Stages, stage) + } + + ex.BuildGraph() + + return ex, nil +} + +func (f *ExecutionFactory) saveSource(ctx context.Context, ex *execution.Execution, def sources.Definition) error { + switch def.GetType() { + case source.InlineDefinition: + break + case source.FilestorageBucketDefinition: + typedSrc := def.AsFilestorageBucketDefinition() + + bucketID := typedSrc.BucketID + ttl := f.cfg.SourceTTL.FilestorageBucket + downloadEndpoint := typedSrc.DownloadEndpoint + + if err := f.filestorage.DownloadBucket(ctx, bucketID, ttl, downloadEndpoint); err != nil { + return fmt.Errorf("failed to download bucket %s: %w", bucketID, err) + } + case source.FilestorageBucketFileDefinition: + typedSrc := def.AsFilestorageBucketFileDefinition() + + bucketID := typedSrc.BucketID + file := typedSrc.File + ttl := f.cfg.SourceTTL.FilestorageBucket + downloadEndpoint := typedSrc.DownloadEndpoint + + if err := f.filestorage.DownloadFile(ctx, bucketID, file, ttl, downloadEndpoint); err != nil { + return fmt.Errorf("failed to download file %s: %w", bucketID, err) + } + default: + return fmt.Errorf("unknown source definition type '%s'", def.GetType()) + } + + ex.SourceDefinitionByName[def.GetName()] = def + + return nil +} + +func (f *ExecutionFactory) createStage(ex *execution.Execution, def execution.StageDefinition) (*execution.Stage, error) { + stage := execution.Stage{ + Name: def.Name, + Deps: def.Deps, + Jobs: make([]jobs.Job, len(def.Jobs)), + } + + for _, jobDef := range def.Jobs { + jb, err := f.createJob(ex, jobDef) + if err != nil { + return nil, fmt.Errorf("failed to create job '%s': %w", jobDef.GetName(), err) + } + + stage.Jobs = append(stage.Jobs, jb) + ex.JobByName[jobDef.GetName()] = jb + } + + stage.BuildGraph() + + return &stage, nil +} + +func (f *ExecutionFactory) createJob(ex *execution.Execution, def jobs.Definition) (jobs.Job, error) { + var jb jobs.Job + + id, err := f.calculateJobID(ex.ID.String(), string(def.GetName())) + if err != nil { + return jb, fmt.Errorf("failed to calculate job '%s' id: %w", def.GetName(), err) + } + + successStatus := def.GetSuccessStatus() + + switch def.GetType() { + case job.CompileCpp: + typedDef := def.AsCompileCpp() + + code, err := f.createInput(ex, typedDef.Code) + if err != nil { + return jb, fmt.Errorf("failed to create code source: %w", err) + } + compiledCode := output.NewOutput(f.cfg.Output.CompiledBinary) + + jb = jobs.NewCompileCppJob(id, successStatus, code, compiledCode) + case job.CompileGo: + typedDef := def.AsCompileGo() + + code, err := f.createInput(ex, typedDef.Code) + if err != nil { + return jb, fmt.Errorf("failed to create code source: %w", err) + } + compiledCode := output.NewOutput(f.cfg.Output.CompiledBinary) + + jb = jobs.NewCompileGoJob(id, successStatus, code, compiledCode) + case job.RunCpp: + typedDef := def.AsRunCpp() + + compiledCode, err := f.createInput(ex, typedDef.CompiledCode) + if err != nil { + return jb, fmt.Errorf("failed to create compiled_code source: %w", err) + } + runInput, err := f.createInput(ex, typedDef.RunInput) + if err != nil { + return jb, fmt.Errorf("failed to create run_input source: %w", err) + } + runOutput := output.NewOutput(f.cfg.Output.RunOutput) + timeLimit := typedDef.TimeLimit + memoryLimit := typedDef.MemoryLimit + showOutput := typedDef.ShowOutput + + jb = jobs.NewRunCppJob(id, successStatus, compiledCode, runInput, runOutput, timeLimit, memoryLimit, showOutput) + case job.RunGo: + typedDef := def.AsRunGo() + + compiledCode, err := f.createInput(ex, typedDef.CompiledCode) + if err != nil { + return jb, fmt.Errorf("failed to create compiled_code source: %w", err) + } + runInput, err := f.createInput(ex, typedDef.RunInput) + if err != nil { + return jb, fmt.Errorf("failed to create run_input source: %w", err) + } + runOutput := output.NewOutput(f.cfg.Output.RunOutput) + timeLimit := typedDef.TimeLimit + memoryLimit := typedDef.MemoryLimit + showOutput := typedDef.ShowOutput + + jb = jobs.NewRunGoJob(id, successStatus, compiledCode, runInput, runOutput, timeLimit, memoryLimit, showOutput) + case job.RunPy: + typedDef := def.AsRunPy() + + code, err := f.createInput(ex, typedDef.Code) + if err != nil { + return jb, fmt.Errorf("failed to create code source: %w", err) + } + runInput, err := f.createInput(ex, typedDef.RunInput) + if err != nil { + return jb, fmt.Errorf("failed to create run_input source: %w", err) + } + runOutput := output.NewOutput(f.cfg.Output.RunOutput) + timeLimit := typedDef.TimeLimit + memoryLimit := typedDef.MemoryLimit + showOutput := typedDef.ShowOutput + + jb = jobs.NewRunPyJob(id, successStatus, code, runInput, runOutput, timeLimit, memoryLimit, showOutput) + case job.CheckCpp: + typedDef := def.AsCheckCpp() + + compiledChecker, err := f.createInput(ex, typedDef.CompiledChecker) + if err != nil { + return jb, fmt.Errorf("failed to create compiled_checker source: %w", err) + } + correctOutput, err := f.createInput(ex, typedDef.CorrectOutput) + if err != nil { + return jb, fmt.Errorf("failed to create correct_output source: %w", err) + } + suspectOutput, err := f.createInput(ex, typedDef.SuspectOutput) + if err != nil { + return jb, fmt.Errorf("failed to create suspect_output source: %w", err) + } + + jb = jobs.NewCheckCppJob(id, successStatus, compiledChecker, correctOutput, suspectOutput) + default: + return jb, fmt.Errorf("unknown job type %s", def.GetType()) + } + + out := jb.GetOutput() + if out != nil { + ex.OutputByJob[jb.GetID()] = *out + } + + return jb, nil +} + +func (f *ExecutionFactory) createInput(ex *execution.Execution, def inputs.Definition) (input.Input, error) { + var in input.Input + + switch def.GetType() { + case input.InlineDefinition: + typedDef := def.AsInline() + + srcDef, ok := ex.SourceDefinitionByName[typedDef.SourceDefinitionName] + if !ok { + return in, fmt.Errorf("failed to find source definition '%s'", typedDef.SourceDefinitionName) + } + typedSrcDef := srcDef.AsInlineDefinition() + + sourceID, err := f.calculateSourceID(ex.ID.String(), string(srcDef.GetName())) + if err != nil { + return in, fmt.Errorf("failed to calculate source id: %w", err) + } + + src := sources.NewInlineSource(sourceID, typedSrcDef.Content) + ex.SourceByID[src.GetID()] = src + + in = input.NewInput(input.Inline, src.GetID()) + case input.FilestorageBucketDefinition: + typedDef := def.AsFilestorageBucket() + + srcDef, ok := ex.SourceDefinitionByName[typedDef.SourceDefinitionName] + if !ok { + return in, fmt.Errorf("failed to find source definition '%s'", typedDef.SourceDefinitionName) + } + typedSrcDef := srcDef.AsFilestorageBucketDefinition() + + sourceID, err := f.calculateSourceID(ex.ID.String(), string(srcDef.GetName())) + if err != nil { + return in, fmt.Errorf("failed to calculate source id: %w", err) + } + + bucketID := typedSrcDef.BucketID + downloadEndpoint := f.cfg.FilestorageEndpoint + file := typedDef.File + + src := sources.NewFilestorageBucketFileSource(sourceID, bucketID, downloadEndpoint, file) + ex.SourceByID[src.GetID()] = src + + in = input.NewInput(input.FilestorageBucketFile, src.GetID()) + case input.FilestorageBucketFileDefinition: + typedDef := def.AsFilestorageBucketFile() + + srcDef, ok := ex.SourceDefinitionByName[typedDef.SourceDefinitionName] + if !ok { + return in, fmt.Errorf("failed to find source definition '%s'", typedDef.SourceDefinitionName) + } + typedSrcDef := srcDef.AsFilestorageBucketFileDefinition() + + sourceID, err := f.calculateSourceID(ex.ID.String(), string(srcDef.GetName())) + if err != nil { + return in, fmt.Errorf("failed to calculate source id: %w", err) + } + + bucketID := typedSrcDef.BucketID + downloadEndpoint := f.cfg.FilestorageEndpoint + file := typedSrcDef.File + + src := sources.NewFilestorageBucketFileSource(sourceID, bucketID, downloadEndpoint, file) + ex.SourceByID[src.GetID()] = src + + in = input.NewInput(input.FilestorageBucketFile, src.GetID()) + case input.ArtifactDefinition: + typedDef := def.AsArtifact() + + jb, ok := ex.JobByName[typedDef.JobDefinitionName] + if !ok { + return in, fmt.Errorf("failed to find job '%s'", typedDef.JobDefinitionName) + } + + jobID := jb.GetID() + sourceID, err := f.calculateSourceID(ex.ID.String(), jobID.String()) + if err != nil { + return in, fmt.Errorf("failed to calculate source id: %w", err) + } + + in = input.NewInput(input.Artifact, sourceID) + default: + return in, fmt.Errorf("unknown input type: %s", def.GetType()) + } + + return in, nil +} + +func (f *ExecutionFactory) calculateSourceID(vars ...string) (source.ID, error) { + var id source.ID + + hash := sha1.New() + for _, v := range vars { + hash.Write([]byte(v)) + } + + if err := id.FromString(fmt.Sprintf("%x", hash.Sum(nil))); err != nil { + return id, err + } + + return id, nil +} + +func (f *ExecutionFactory) calculateJobID(vars ...string) (job.ID, error) { + var id job.ID + + hash := sha1.New() + for _, v := range vars { + hash.Write([]byte(v)) + } + + if err := id.FromString(fmt.Sprintf("%x", hash.Sum(nil))); err != nil { + return id, err + } + + return id, nil +} diff --git a/Exesh/internal/factory/job_factory.go b/Exesh/internal/factory/job_factory.go deleted file mode 100644 index 6c4216ba..00000000 --- a/Exesh/internal/factory/job_factory.go +++ /dev/null @@ -1,271 +0,0 @@ -package factory - -import ( - "context" - "crypto/sha1" - "encoding/json" - "exesh/internal/config" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "exesh/internal/domain/execution/jobs" - "exesh/internal/domain/execution/outputs" - "exesh/internal/domain/execution/sources" - "exesh/internal/domain/execution/steps" - "fmt" - "io" - "log/slog" - - "github.com/google/uuid" -) - -type ( - JobFactory struct { - log *slog.Logger - cfg config.JobFactoryConfig - - artifactRegistry artifactRegistry - inputProvider inputProvider - } - - artifactRegistry interface { - GetWorker(execution.JobID) (string, error) - } - - inputProvider interface { - Create(context.Context, execution.Input) (w io.Writer, commit, abort func() error, err error) - Locate(context.Context, execution.Input) (path string, unlock func(), err error) - Read(context.Context, execution.Input) (r io.Reader, unlock func(), err error) - } -) - -func NewJobFactory( - log *slog.Logger, - cfg config.JobFactoryConfig, - artifactRegistry artifactRegistry, - inputProvider inputProvider, -) *JobFactory { - return &JobFactory{ - log: log, - cfg: cfg, - - artifactRegistry: artifactRegistry, - inputProvider: inputProvider, - } -} - -func (f *JobFactory) Create(ctx context.Context, execCtx *execution.Context, step execution.Step) (execution.Job, error) { - switch step.GetType() { - case execution.CompileCppStepType: - typedStep := step.(*steps.CompileCppStep) - - code, err := f.createInput(ctx, execCtx, typedStep.Code) - if err != nil { - return nil, fmt.Errorf("failed to create code source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{code}, map[string]any{}) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - compiledCode := outputs.NewArtifactOutput(f.cfg.Output.CompiledCpp, id) - - return jobs.NewCompileCppJob(id, code, compiledCode), nil - case execution.RunCppStepType: - typedStep := step.(*steps.RunCppStep) - - compiledCode, err := f.createInput(ctx, execCtx, typedStep.CompiledCode) - if err != nil { - return nil, fmt.Errorf("failed to create compiled_code source: %w", err) - } - runSource, err := f.createInput(ctx, execCtx, typedStep.RunInput) - if err != nil { - return nil, fmt.Errorf("failed to create run_input source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{compiledCode, runSource}, step.GetAttributes()) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - runOutput := outputs.NewArtifactOutput(f.cfg.Output.RunOutput, id) - - return jobs.NewRunCppJob(id, compiledCode, runSource, runOutput, typedStep.TimeLimit, typedStep.MemoryLimit, typedStep.ShowOutput), nil - case execution.RunPyStepType: - typedStep := step.(*steps.RunPyStep) - - code, err := f.createInput(ctx, execCtx, typedStep.Code) - if err != nil { - return nil, fmt.Errorf("failed to create code source: %w", err) - } - runSource, err := f.createInput(ctx, execCtx, typedStep.RunInput) - if err != nil { - return nil, fmt.Errorf("failed to create run_input source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{code, runSource}, step.GetAttributes()) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - runOutput := outputs.NewArtifactOutput(f.cfg.Output.RunOutput, id) - - return jobs.NewRunPyJob(id, code, runSource, runOutput, typedStep.TimeLimit, typedStep.MemoryLimit, typedStep.ShowOutput), nil - case execution.CompileGoStepType: - typedStep := step.(*steps.CompileGoStep) - - code, err := f.createInput(ctx, execCtx, typedStep.Code) - if err != nil { - return nil, fmt.Errorf("failed to create code source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{code}, map[string]any{}) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - compiledCode := outputs.NewArtifactOutput(f.cfg.Output.CompiledCpp, id) - - return jobs.NewCompileGoJob(id, code, compiledCode), nil - case execution.RunGoStepType: - typedStep := step.(*steps.RunGoStep) - - code, err := f.createInput(ctx, execCtx, typedStep.CompiledCode) - if err != nil { - return nil, fmt.Errorf("failed to create code source: %w", err) - } - runSource, err := f.createInput(ctx, execCtx, typedStep.RunInput) - if err != nil { - return nil, fmt.Errorf("failed to create run_input source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{code, runSource}, step.GetAttributes()) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - runOutput := outputs.NewArtifactOutput(f.cfg.Output.RunOutput, id) - - return jobs.NewRunGoJob(id, code, runSource, runOutput, typedStep.TimeLimit, typedStep.MemoryLimit, typedStep.ShowOutput), nil - case execution.CheckCppStepType: - typedStep := step.(*steps.CheckCppStep) - - compiledChecker, err := f.createInput(ctx, execCtx, typedStep.CompiledChecker) - if err != nil { - return nil, fmt.Errorf("failed to create compiled_checker source: %w", err) - } - correctOutput, err := f.createInput(ctx, execCtx, typedStep.CorrectOutput) - if err != nil { - return nil, fmt.Errorf("failed to create correct_output source: %w", err) - } - suspectOutput, err := f.createInput(ctx, execCtx, typedStep.SuspectOutput) - if err != nil { - return nil, fmt.Errorf("failed to create suspect_output source: %w", err) - } - - id, err := f.calculateID(ctx, []execution.Input{compiledChecker, correctOutput, suspectOutput}, step.GetAttributes()) - if err != nil { - return nil, fmt.Errorf("failed to calculate job id for step '%s': %w", step.GetName(), err) - } - - return jobs.NewCheckCppJob(id, compiledChecker, correctOutput, suspectOutput), nil - default: - return nil, fmt.Errorf("unknown step type %s", step.GetType()) - } -} - -func (f *JobFactory) createInput(ctx context.Context, execCtx *execution.Context, source execution.Source) (input execution.Input, err error) { - switch source.GetType() { - case execution.OtherStepSourceType: - typedSource := source.(*sources.OtherStepSource) - otherJob, ok := execCtx.GetJobForStep(typedSource.StepName) - if !ok { - return nil, fmt.Errorf("failed to get job id for step %s", typedSource.StepName) - } - otherJobOutput := otherJob.GetOutput() - if otherJobOutput == nil { - return nil, fmt.Errorf("failed to get dep job output for step %s", typedSource.StepName) - } - workerID, err := f.artifactRegistry.GetWorker(otherJob.GetID()) - if err != nil { - return nil, fmt.Errorf("failed to find worker for job %s: %w", otherJob.GetID().String(), err) - } - input = inputs.NewArtifactInput(otherJobOutput.GetFile(), otherJob.GetID(), workerID) - case execution.InlineSourceType: - typedSource := source.(*sources.InlineSource) - input = inputs.NewFilestorageBucketInput(uuid.New().String(), execCtx.InlineSourcesBucketID, f.cfg.FilestorageEndpoint) - w, commit, abort, err := f.inputProvider.Create(ctx, input) - if err != nil { - return nil, fmt.Errorf("failed to save inline source to filestorage: %w", err) - } - if _, err = w.Write([]byte(typedSource.Content)); err != nil { - _ = abort() - return nil, fmt.Errorf("failed to write inline source to filestorage: %w", err) - } - if err = commit(); err != nil { - _ = abort() - return nil, fmt.Errorf("failed to commit filestorage input creation: %w", err) - } - case execution.FilestorageBucketSourceType: - typedSource := source.(*sources.FilestorageBucketSource) - input = inputs.NewFilestorageBucketInput(typedSource.File, typedSource.BucketID, typedSource.DownloadEndpoint) - _, unlock, err := f.inputProvider.Locate(ctx, input) - if err != nil { - return nil, fmt.Errorf("failed to locate filestorage bucket input: %w", err) - } - unlock() - input = inputs.NewFilestorageBucketInput(typedSource.File, typedSource.BucketID, f.cfg.FilestorageEndpoint) - default: - err = fmt.Errorf("unknown source type %s: %w", source.GetType(), err) - } - return input, err -} - -func (f *JobFactory) calculateID( - ctx context.Context, - inputs []execution.Input, - attributes map[string]any, -) (id execution.JobID, err error) { - hash := sha1.New() - for _, input := range inputs { - var r io.Reader - var unlock func() - r, unlock, err = f.inputProvider.Read(ctx, input) - if err != nil { - err = fmt.Errorf("failed to read %s input: %w", input.GetType(), err) - return id, err - } - var content []byte - if content, err = io.ReadAll(r); err != nil { - unlock() - err = fmt.Errorf("failed to read %s input's content: %w", input.GetType(), err) - return id, err - } - unlock() - if _, err = hash.Write(content); err != nil { - err = fmt.Errorf("failed to write %s input to hash: %w", input.GetType(), err) - return id, err - } - } - - bytes, err := json.Marshal(attributes) - if err != nil { - err = fmt.Errorf("failed to marshal attributes: %w", err) - return id, err - } - hash.Write(bytes) - - // temp: add random string to make all ids different - rand, err := uuid.NewUUID() - if err != nil { - err = fmt.Errorf("failed to generate random string") - return id, err - } - hash.Write([]byte(rand.String())) - - if err = id.FromString(fmt.Sprintf("%x", hash.Sum(nil))); err != nil { - return id, err - } - - return id, err -} diff --git a/Exesh/internal/factory/message_factory.go b/Exesh/internal/factory/message_factory.go index dbb2b74a..7488d54e 100644 --- a/Exesh/internal/factory/message_factory.go +++ b/Exesh/internal/factory/message_factory.go @@ -2,56 +2,62 @@ package factory import ( "exesh/internal/domain/execution" - "exesh/internal/domain/execution/messages" - "exesh/internal/domain/execution/results" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/message/messages" + "exesh/internal/domain/execution/result" + "exesh/internal/domain/execution/result/results" "fmt" - "log/slog" ) -type MessageFactory struct { - log *slog.Logger -} +type MessageFactory struct{} -func NewMessageFactory(log *slog.Logger) *MessageFactory { - return &MessageFactory{ - log: log, - } +func NewMessageFactory() *MessageFactory { + return &MessageFactory{} } -func (f *MessageFactory) CreateExecutionStarted(execCtx *execution.Context) execution.Message { - return messages.NewStartExecutionMessage(execCtx.ExecutionID) +func (f *MessageFactory) CreateExecutionStarted(executionID execution.ID) messages.Message { + return messages.NewStartExecutionMessage(executionID) } -func (f *MessageFactory) CreateForStep(execCtx *execution.Context, step execution.Step, result execution.Result) (execution.Message, error) { - switch result.GetType() { - case execution.CompileResult: - typedResult := result.(*results.CompileResult) - if typedResult.Status == results.CompileStatusOK { - return messages.NewCompileStepMessage(execCtx.ExecutionID, step.GetName()), nil - } else if typedResult.Status == results.CompileStatusCE { - return messages.NewCompileStepMessageError(execCtx.ExecutionID, step.GetName(), typedResult.CompilationError), nil - } else { - return nil, fmt.Errorf("unknown compile status: %s", typedResult.Status) +func (f *MessageFactory) CreateForJob( + executionID execution.ID, + jobName job.DefinitionName, + res results.Result, +) (messages.Message, error) { + var msg messages.Message + + switch res.GetType() { + case result.Compile: + typedRes := res.AsCompile() + switch typedRes.Status { + case job.StatusOK: + msg = messages.NewCompileJobMessageOk(executionID, jobName) + case job.StatusCE: + msg = messages.NewCompileJobMessageError(executionID, jobName, typedRes.CompilationError) + default: + return msg, fmt.Errorf("unknown compile status: %s", typedRes.Status) } - case execution.RunResult: - typedResult := result.(*results.RunResult) - if typedResult.Status == results.RunStatusOK && typedResult.HasOutput { - return messages.NewRunStepMessageWithOutput(execCtx.ExecutionID, step.GetName(), typedResult.Output), nil + case result.Run: + typedRes := res.AsRun() + if !typedRes.HasOutput { + msg = messages.NewRunJobMessage(executionID, jobName, typedRes.Status) } else { - return messages.NewRunStepMessage(execCtx.ExecutionID, step.GetName(), typedResult.Status), nil + msg = messages.NewRunJobMessageWithOutput(executionID, jobName, typedRes.Output) } - case execution.CheckResult: - typedResult := result.(*results.CheckResult) - return messages.NewCheckStepMessage(execCtx.ExecutionID, step.GetName(), typedResult.Status), nil + case result.Check: + typedRes := res.AsCheck() + msg = messages.NewCheckJobMessage(executionID, jobName, typedRes.Status) default: - return nil, fmt.Errorf("unknown result type %s", result.GetType()) + return msg, fmt.Errorf("unknown result type %s", res.GetType()) } + + return msg, nil } -func (f *MessageFactory) CreateExecutionFinished(execCtx *execution.Context) execution.Message { - return messages.NewFinishExecutionMessage(execCtx.ExecutionID) +func (f *MessageFactory) CreateExecutionFinished(executionID execution.ID) messages.Message { + return messages.NewFinishExecutionMessageOk(executionID) } -func (f *MessageFactory) CreateExecutionFinishedError(execCtx *execution.Context, err string) execution.Message { - return messages.NewFinishExecutionMessageError(execCtx.ExecutionID, err) +func (f *MessageFactory) CreateExecutionFinishedError(executionID execution.ID, err string) messages.Message { + return messages.NewFinishExecutionMessageError(executionID, err) } diff --git a/Exesh/internal/provider/providers/adapter/filestorage_adapter.go b/Exesh/internal/provider/adapter/filestorage_adapter.go similarity index 61% rename from Exesh/internal/provider/providers/adapter/filestorage_adapter.go rename to Exesh/internal/provider/adapter/filestorage_adapter.go index 49400a2b..9203b8f6 100644 --- a/Exesh/internal/provider/providers/adapter/filestorage_adapter.go +++ b/Exesh/internal/provider/adapter/filestorage_adapter.go @@ -3,7 +3,6 @@ package adapter import ( "context" "errors" - "exesh/internal/provider" "fmt" "io" "os" @@ -34,7 +33,52 @@ func NewFilestorageAdapter(filestorage filestorage) *FilestorageAdapter { } } -func (a *FilestorageAdapter) Reserve( +// DownloadBucket +// the bucket will be downloaded +// if the bucket already exists, the ttl will be extended +func (a *FilestorageAdapter) DownloadBucket( + ctx context.Context, + bucketID bucket.ID, + ttl time.Duration, + downloadEndpoint string, +) error { + return a.filestorage.DownloadBucket(ctx, downloadEndpoint, bucketID, ttl) +} + +// DownloadFile +// the file will be downloaded +// if the bucket does not exist, it will be created with ttl +// if the file already exists, nothing will happen +func (a *FilestorageAdapter) DownloadFile( + ctx context.Context, + bucketID bucket.ID, + file string, + ttl time.Duration, + downloadEndpoint string, +) error { + err := a.filestorage.DownloadFile(ctx, downloadEndpoint, bucketID, file) + if err == nil { + return nil + } + + // the bucket does not exist, so create it + _, commit, _, err := a.filestorage.ReserveBucket(ctx, bucketID, ttl) + if err != nil { + return fmt.Errorf("failed to reserve bucket: %w", err) + } + if err := commit(); err != nil { + return fmt.Errorf("failed to commit bucket: %w", err) + } + + return a.filestorage.DownloadFile(ctx, downloadEndpoint, bucketID, file) +} + +// ReserveFile +// the file will be reserved in bucket +// it needs for producing files +// if the bucket does not exist, it will be created +// if the file already exists, then the ErrSourceAlreadyExists will be returned +func (a *FilestorageAdapter) ReserveFile( ctx context.Context, bucketID bucket.ID, file string, @@ -45,7 +89,6 @@ func (a *FilestorageAdapter) Reserve( if err != nil && errors.Is(err, errs.ErrBucketAlreadyExists) { path, commitArtifact, abortArtifact, err = a.filestorage.ReserveFile(ctx, bucketID, file) if err != nil && errors.Is(err, errs.ErrFileAlreadyExists) { - err = provider.ErrInputAlreadyExists return } } @@ -72,14 +115,20 @@ func (a *FilestorageAdapter) Reserve( return filepath.Join(path, file), commit, abort, nil } -func (a *FilestorageAdapter) Create( +// CreateFile +// the file will be reserved in bucket +// it needs for producing files using writer +// if the bucket does not exist, it will be created +// if the file already exists, then the ErrSourceAlreadyExists will be returned +func (a *FilestorageAdapter) CreateFile( ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, ) (w io.Writer, commit, abort func() error, err error) { - path, commitReserve, abortReserve, err := a.Reserve(ctx, bucketID, file, ttl) + path, commitReserve, abortReserve, err := a.ReserveFile(ctx, bucketID, file, ttl) if err != nil { + err = fmt.Errorf("failed to reserve file: %w", err) return } @@ -119,59 +168,27 @@ func (a *FilestorageAdapter) Create( return } -func (a *FilestorageAdapter) Locate( +// LocateFile +// returns real file path +// if the file does not exist, the error will be returned +func (a *FilestorageAdapter) LocateFile( ctx context.Context, bucketID bucket.ID, file string, - ttl time.Duration, - downloadEndpoint string, ) (path string, unlock func(), err error) { - if path, unlock, err = a.filestorage.GetFile(ctx, bucketID, file); err != nil { - if err = a.filestorage.DownloadFile(ctx, downloadEndpoint, bucketID, file); err != nil { - if !errors.Is(err, errs.ErrBucketNotFound) { - err = fmt.Errorf("failed to download file: %w", err) - return - } - - var commit, abort func() error - _, commit, abort, err = a.filestorage.ReserveBucket(ctx, bucketID, ttl) - if err != nil && !errors.Is(err, errs.ErrBucketAlreadyExists) { - err = fmt.Errorf("failed to reserve bucket: %w", err) - return - } - if err == nil { - if err = commit(); err != nil { - _ = abort() - err = fmt.Errorf("failed to commit bucket: %w", err) - return - } - } - - err = a.filestorage.DownloadFile(ctx, downloadEndpoint, bucketID, file) - } - if err != nil { - err = fmt.Errorf("failed to download file: %w", err) - return - } - - path, unlock, err = a.filestorage.GetFile(ctx, bucketID, file) - } - if err != nil { - err = fmt.Errorf("failed to get file from bucket: %w", err) - return - } - + path, unlock, err = a.filestorage.GetFile(ctx, bucketID, file) return filepath.Join(path, file), unlock, nil } -func (a *FilestorageAdapter) Read( +// ReadFile +// returns reader to file +// if the file does not exist, the error will be returned +func (a *FilestorageAdapter) ReadFile( ctx context.Context, bucketID bucket.ID, file string, - ttl time.Duration, - downloadEndpoint string, ) (r io.Reader, unlock func(), err error) { - path, unlock, err := a.Locate(ctx, bucketID, file, ttl, downloadEndpoint) + path, unlock, err := a.LocateFile(ctx, bucketID, file) if err != nil { return } diff --git a/Exesh/internal/provider/input_provider.go b/Exesh/internal/provider/input_provider.go deleted file mode 100644 index 1d48efa1..00000000 --- a/Exesh/internal/provider/input_provider.go +++ /dev/null @@ -1,61 +0,0 @@ -package provider - -import ( - "context" - "errors" - "exesh/internal/domain/execution" - "fmt" - "io" -) - -type ( - InputProvider struct { - providers []inputProvider - } - - inputProvider interface { - SupportsType(execution.InputType) bool - Reserve(context.Context, execution.Input) (path string, commit, abort func() error, err error) - Create(context.Context, execution.Input) (w io.Writer, commit, abort func() error, err error) - Locate(context.Context, execution.Input) (path string, unlock func(), err error) - Read(context.Context, execution.Input) (r io.Reader, unlock func(), err error) - } -) - -var ( - ErrInputAlreadyExists = errors.New("input already exists") -) - -func NewInputProvider(providers ...inputProvider) *InputProvider { - return &InputProvider{providers: providers} -} - -func (p *InputProvider) Create(ctx context.Context, input execution.Input) (w io.Writer, commit, abort func() error, err error) { - for _, provider := range p.providers { - if provider.SupportsType(input.GetType()) { - return provider.Create(ctx, input) - } - } - err = fmt.Errorf("provider for %s input not found", input.GetType()) - return -} - -func (p *InputProvider) Locate(ctx context.Context, input execution.Input) (path string, unlock func(), err error) { - for _, provider := range p.providers { - if provider.SupportsType(input.GetType()) { - return provider.Locate(ctx, input) - } - } - err = fmt.Errorf("provider for %s input not found", input.GetType()) - return -} - -func (p *InputProvider) Read(ctx context.Context, input execution.Input) (r io.Reader, unlock func(), err error) { - for _, provider := range p.providers { - if provider.SupportsType(input.GetType()) { - return provider.Read(ctx, input) - } - } - err = fmt.Errorf("provider for %s input not found", input.GetType()) - return -} diff --git a/Exesh/internal/provider/output_provider.go b/Exesh/internal/provider/output_provider.go index a98a332d..912f40ef 100644 --- a/Exesh/internal/provider/output_provider.go +++ b/Exesh/internal/provider/output_provider.go @@ -2,54 +2,57 @@ package provider import ( "context" - "exesh/internal/domain/execution" + "exesh/internal/config" + "exesh/internal/domain/execution/job" "fmt" + "github.com/DIvanCode/filestorage/pkg/bucket" "io" ) -type ( - OutputProvider struct { - providers []outputProvider - } +type OutputProvider struct { + cfg config.OutputProviderConfig + filestorage filestorage +} - outputProvider interface { - SupportsType(execution.OutputType) bool - Reserve(context.Context, execution.Output) (path string, commit, abort func() error, err error) - Create(context.Context, execution.Output) (w io.Writer, commit, abort func() error, err error) - Read(context.Context, execution.Output) (r io.Reader, unlock func(), err error) +func NewOutputProvider(cfg config.OutputProviderConfig, filestorage filestorage) *OutputProvider { + return &OutputProvider{ + cfg: cfg, + filestorage: filestorage, } -) - -func NewOutputProvider(providers ...outputProvider) *OutputProvider { - return &OutputProvider{providers: providers} } -func (p *OutputProvider) Reserve(ctx context.Context, output execution.Output) (path string, commit, abort func() error, err error) { - for _, provider := range p.providers { - if provider.SupportsType(output.GetType()) { - return provider.Reserve(ctx, output) - } +func (p *OutputProvider) Reserve(ctx context.Context, jobID job.ID, file string) (path string, commit, abort func() error, err error) { + var bucketID bucket.ID + if err = bucketID.FromString(jobID.String()); err != nil { + err = fmt.Errorf("failed to create bucket id: %w", err) + return } - err = fmt.Errorf("provider for %s iutput not found", output.GetType()) - return + + ttl := p.cfg.ArtifactTTL + return p.filestorage.ReserveFile(ctx, bucketID, file, ttl) } -func (p *OutputProvider) Create(ctx context.Context, output execution.Output) (w io.Writer, commit, abort func() error, err error) { - for _, provider := range p.providers { - if provider.SupportsType(output.GetType()) { - return provider.Create(ctx, output) - } +func (p *OutputProvider) Read(ctx context.Context, jobID job.ID, file string) (r io.Reader, unlock func(), err error) { + var bucketID bucket.ID + if err = bucketID.FromString(jobID.String()); err != nil { + err = fmt.Errorf("failed to create bucket id: %w", err) + return } - err = fmt.Errorf("provider for %s iutput not found", output.GetType()) - return + + return p.filestorage.ReadFile(ctx, bucketID, file) } -func (p *OutputProvider) Read(ctx context.Context, output execution.Output) (r io.Reader, unlock func(), err error) { - for _, provider := range p.providers { - if provider.SupportsType(output.GetType()) { - return provider.Read(ctx, output) - } +func (p *OutputProvider) Create( + ctx context.Context, + jobID job.ID, + file string, +) (w io.Writer, commit, abort func() error, err error) { + var bucketID bucket.ID + if err = bucketID.FromString(jobID.String()); err != nil { + err = fmt.Errorf("failed to create bucket id: %w", err) + return } - err = fmt.Errorf("provider for %s output not found", output.GetType()) - return + + ttl := p.cfg.ArtifactTTL + return p.filestorage.CreateFile(ctx, bucketID, file, ttl) } diff --git a/Exesh/internal/provider/provider.go b/Exesh/internal/provider/provider.go new file mode 100644 index 00000000..181778c6 --- /dev/null +++ b/Exesh/internal/provider/provider.go @@ -0,0 +1,17 @@ +package provider + +import ( + "context" + "github.com/DIvanCode/filestorage/pkg/bucket" + "io" + "time" +) + +type filestorage interface { + DownloadBucket(context.Context, bucket.ID, time.Duration, string) error + DownloadFile(context.Context, bucket.ID, string, time.Duration, string) error + CreateFile(context.Context, bucket.ID, string, time.Duration) (io.Writer, func() error, func() error, error) + ReserveFile(context.Context, bucket.ID, string, time.Duration) (string, func() error, func() error, error) + ReadFile(context.Context, bucket.ID, string) (io.Reader, func(), error) + LocateFile(context.Context, bucket.ID, string) (string, func(), error) +} diff --git a/Exesh/internal/provider/providers/artifact_source_input_provider.go b/Exesh/internal/provider/providers/artifact_source_input_provider.go deleted file mode 100644 index be31c559..00000000 --- a/Exesh/internal/provider/providers/artifact_source_input_provider.go +++ /dev/null @@ -1,133 +0,0 @@ -package providers - -import ( - "context" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "fmt" - "io" - "time" - - "github.com/DIvanCode/filestorage/pkg/bucket" -) - -type ( - ArtifactInputProvider struct { - artifactStorageAdapter artifactInputStorageAdapter - artifactTTL time.Duration - } - - artifactInputStorageAdapter interface { - Reserve(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - path string, commit, abort func() error, err error) - Create(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - w io.Writer, commit, abort func() error, err error) - Locate(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, downloadEndpoint string) ( - path string, unlock func(), err error) - Read(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, downloadEndpoint string) ( - r io.Reader, unlock func(), err error) - } -) - -func NewArtifactInputProvider(artifactStorageAdapter artifactInputStorageAdapter, artifactTTL time.Duration) *ArtifactInputProvider { - return &ArtifactInputProvider{ - artifactStorageAdapter: artifactStorageAdapter, - artifactTTL: artifactTTL, - } -} - -func (p *ArtifactInputProvider) SupportsType(inputType execution.InputType) bool { - return inputType == execution.ArtifactInputType -} - -func (p *ArtifactInputProvider) Reserve(ctx context.Context, input execution.Input) (path string, commit, abort func() error, err error) { - if input.GetType() != execution.ArtifactInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.ArtifactInputType) - return - } - var typedInput inputs.ArtifactInput - if _, ok := input.(inputs.ArtifactInput); ok { - typedInput = input.(inputs.ArtifactInput) - } else { - typedInput = *input.(*inputs.ArtifactInput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedInput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Reserve(ctx, bucketID, typedInput.File, p.artifactTTL) -} - -func (p *ArtifactInputProvider) Create(ctx context.Context, input execution.Input) (w io.Writer, commit, abort func() error, err error) { - if input.GetType() != execution.ArtifactInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.ArtifactInputType) - return - } - var typedInput inputs.ArtifactInput - if _, ok := input.(inputs.ArtifactInput); ok { - typedInput = input.(inputs.ArtifactInput) - } else { - typedInput = *input.(*inputs.ArtifactInput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedInput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Create(ctx, bucketID, typedInput.File, p.artifactTTL) -} - -func (p *ArtifactInputProvider) Locate(ctx context.Context, input execution.Input) (path string, unlock func(), err error) { - if input.GetType() != execution.ArtifactInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.ArtifactInputType) - return - } - var typedInput inputs.ArtifactInput - if _, ok := input.(inputs.ArtifactInput); ok { - typedInput = input.(inputs.ArtifactInput) - } else { - typedInput = *input.(*inputs.ArtifactInput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedInput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Locate(ctx, bucketID, typedInput.File, p.artifactTTL, typedInput.WorkerID) -} - -func (p *ArtifactInputProvider) Read(ctx context.Context, input execution.Input) (r io.Reader, unlock func(), err error) { - if input.GetType() != execution.ArtifactInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.ArtifactInputType) - return - } - var typedInput inputs.ArtifactInput - if _, ok := input.(inputs.ArtifactInput); ok { - typedInput = input.(inputs.ArtifactInput) - } else { - typedInput = *input.(*inputs.ArtifactInput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedInput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Read(ctx, bucketID, typedInput.File, p.artifactTTL, typedInput.WorkerID) -} - -func (p *ArtifactInputProvider) getBucket(input inputs.ArtifactInput) (bucketID bucket.ID, err error) { - if err = bucketID.FromString(input.JobID.String()); err != nil { - err = fmt.Errorf("failed to convert job id to bucket id: %w", err) - return - } - return -} diff --git a/Exesh/internal/provider/providers/artifact_source_output_provider.go b/Exesh/internal/provider/providers/artifact_source_output_provider.go deleted file mode 100644 index 32c50f08..00000000 --- a/Exesh/internal/provider/providers/artifact_source_output_provider.go +++ /dev/null @@ -1,110 +0,0 @@ -package providers - -import ( - "context" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/outputs" - "fmt" - "io" - "time" - - "github.com/DIvanCode/filestorage/pkg/bucket" -) - -type ( - ArtifactOutputProvider struct { - artifactStorageAdapter artifactOutputStorageAdapter - artifactTTL time.Duration - } - - artifactOutputStorageAdapter interface { - Reserve(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - path string, commit, abort func() error, err error) - Create(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - w io.Writer, commit, abort func() error, err error) - Read(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, downloadEndpoint string) ( - r io.Reader, unlock func(), err error) - } -) - -func NewArtifactOutputProvider(artifactStorageAdapter artifactOutputStorageAdapter, artifactTTL time.Duration) *ArtifactOutputProvider { - return &ArtifactOutputProvider{ - artifactStorageAdapter: artifactStorageAdapter, - artifactTTL: artifactTTL, - } -} - -func (p *ArtifactOutputProvider) SupportsType(outputType execution.OutputType) bool { - return outputType == execution.ArtifactOutputType -} - -func (p *ArtifactOutputProvider) Reserve(ctx context.Context, output execution.Output) (path string, commit, abort func() error, err error) { - if output.GetType() != execution.ArtifactOutputType { - err = fmt.Errorf("unsupported output type %s for %s provider", output.GetType(), execution.ArtifactOutputType) - return - } - var typedOutput outputs.ArtifactOutput - if _, ok := output.(outputs.ArtifactOutput); ok { - typedOutput = output.(outputs.ArtifactOutput) - } else { - typedOutput = *output.(*outputs.ArtifactOutput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedOutput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Reserve(ctx, bucketID, typedOutput.File, p.artifactTTL) -} - -func (p *ArtifactOutputProvider) Create(ctx context.Context, output execution.Output) (w io.Writer, commit, abort func() error, err error) { - if output.GetType() != execution.ArtifactOutputType { - err = fmt.Errorf("unsupported output type %s for %s provider", output.GetType(), execution.ArtifactOutputType) - return - } - var typedOutput outputs.ArtifactOutput - if _, ok := output.(outputs.ArtifactOutput); ok { - typedOutput = output.(outputs.ArtifactOutput) - } else { - typedOutput = *output.(*outputs.ArtifactOutput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedOutput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Create(ctx, bucketID, typedOutput.File, p.artifactTTL) -} - -func (p *ArtifactOutputProvider) getBucket(output outputs.ArtifactOutput) (bucketID bucket.ID, err error) { - if err = bucketID.FromString(output.JobID.String()); err != nil { - err = fmt.Errorf("failed to convert job id to bucket id: %w", err) - return - } - return -} - -func (p *ArtifactOutputProvider) Read(ctx context.Context, output execution.Output) (r io.Reader, unlock func(), err error) { - if output.GetType() != execution.ArtifactOutputType { - err = fmt.Errorf("unsupported output type %s for %s provider", output.GetType(), execution.ArtifactOutputType) - return - } - var typedOutput outputs.ArtifactOutput - if _, ok := output.(outputs.ArtifactOutput); ok { - typedOutput = output.(outputs.ArtifactOutput) - } else { - typedOutput = *output.(*outputs.ArtifactOutput) - } - - var bucketID bucket.ID - bucketID, err = p.getBucket(typedOutput) - if err != nil { - return - } - - return p.artifactStorageAdapter.Read(ctx, bucketID, typedOutput.File, p.artifactTTL, "") -} diff --git a/Exesh/internal/provider/providers/filestorage_bucket_input_provider.go b/Exesh/internal/provider/providers/filestorage_bucket_input_provider.go deleted file mode 100644 index b3e778f6..00000000 --- a/Exesh/internal/provider/providers/filestorage_bucket_input_provider.go +++ /dev/null @@ -1,101 +0,0 @@ -package providers - -import ( - "context" - "exesh/internal/domain/execution" - "exesh/internal/domain/execution/inputs" - "fmt" - "io" - "time" - - "github.com/DIvanCode/filestorage/pkg/bucket" -) - -type ( - FilestorageBucketInputProvider struct { - filestorageAdapter filestorageAdapter - artifactTTL time.Duration - } - - filestorageAdapter interface { - Reserve(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - path string, commit, abort func() error, err error) - Create(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration) ( - w io.Writer, commit, abort func() error, err error) - Locate(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, downloadEndpoint string) ( - path string, unlock func(), err error) - Read(ctx context.Context, bucketID bucket.ID, file string, ttl time.Duration, downloadEndpoint string) ( - r io.Reader, unlock func(), err error) - } -) - -func NewFilestorageBucketInputProvider(filestorageAdapter filestorageAdapter, artifactTTL time.Duration) *FilestorageBucketInputProvider { - return &FilestorageBucketInputProvider{ - filestorageAdapter: filestorageAdapter, - artifactTTL: artifactTTL, - } -} - -func (p *FilestorageBucketInputProvider) SupportsType(inputType execution.InputType) bool { - return inputType == execution.FilestorageBucketInputType -} - -func (p *FilestorageBucketInputProvider) Reserve(ctx context.Context, input execution.Input) (path string, commit, abort func() error, err error) { - if input.GetType() != execution.FilestorageBucketInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.FilestorageBucketInputType) - return - } - var typedInput inputs.FilestorageBucketInput - if _, ok := input.(inputs.FilestorageBucketInput); ok { - typedInput = input.(inputs.FilestorageBucketInput) - } else { - typedInput = *input.(*inputs.FilestorageBucketInput) - } - - return p.filestorageAdapter.Reserve(ctx, typedInput.BucketID, typedInput.File, p.artifactTTL) -} - -func (p *FilestorageBucketInputProvider) Create(ctx context.Context, input execution.Input) (w io.Writer, commit, abort func() error, err error) { - if input.GetType() != execution.FilestorageBucketInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.FilestorageBucketInputType) - return - } - var typedInput inputs.FilestorageBucketInput - if _, ok := input.(inputs.FilestorageBucketInput); ok { - typedInput = input.(inputs.FilestorageBucketInput) - } else { - typedInput = *input.(*inputs.FilestorageBucketInput) - } - - return p.filestorageAdapter.Create(ctx, typedInput.BucketID, typedInput.File, p.artifactTTL) -} - -func (p *FilestorageBucketInputProvider) Locate(ctx context.Context, input execution.Input) (path string, unlock func(), err error) { - if input.GetType() != execution.FilestorageBucketInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.FilestorageBucketInputType) - return - } - var typedInput inputs.FilestorageBucketInput - if _, ok := input.(inputs.FilestorageBucketInput); ok { - typedInput = input.(inputs.FilestorageBucketInput) - } else { - typedInput = *input.(*inputs.FilestorageBucketInput) - } - - return p.filestorageAdapter.Locate(ctx, typedInput.BucketID, typedInput.File, p.artifactTTL, typedInput.DownloadEndpoint) -} - -func (p *FilestorageBucketInputProvider) Read(ctx context.Context, input execution.Input) (r io.Reader, unlock func(), err error) { - if input.GetType() != execution.FilestorageBucketInputType { - err = fmt.Errorf("unsupported input type %s for %s provider", input.GetType(), execution.FilestorageBucketInputType) - return - } - var typedInput inputs.FilestorageBucketInput - if _, ok := input.(inputs.FilestorageBucketInput); ok { - typedInput = input.(inputs.FilestorageBucketInput) - } else { - typedInput = *input.(*inputs.FilestorageBucketInput) - } - - return p.filestorageAdapter.Read(ctx, typedInput.BucketID, typedInput.File, p.artifactTTL, typedInput.DownloadEndpoint) -} diff --git a/Exesh/internal/provider/source_provider.go b/Exesh/internal/provider/source_provider.go new file mode 100644 index 00000000..0ca9b541 --- /dev/null +++ b/Exesh/internal/provider/source_provider.go @@ -0,0 +1,133 @@ +package provider + +import ( + "context" + "exesh/internal/config" + "exesh/internal/domain/execution/source" + "exesh/internal/domain/execution/source/sources" + "fmt" + "github.com/DIvanCode/filestorage/pkg/bucket" + "io" + "sync" +) + +type SourceProvider struct { + cfg config.SourceProviderConfig + filestorage filestorage + + mu sync.Mutex + srcs map[source.ID]string +} + +func NewSourceProvider(cfg config.SourceProviderConfig, filestorage filestorage) *SourceProvider { + return &SourceProvider{ + cfg: cfg, + filestorage: filestorage, + + mu: sync.Mutex{}, + srcs: make(map[source.ID]string), + } +} + +func (p *SourceProvider) SaveSource(ctx context.Context, src sources.Source) error { + switch src.GetType() { + case source.Inline: + typedSrc := src.AsInline() + + sourceID := src.GetID() + var bucketID bucket.ID + if err := bucketID.FromString(sourceID.String()); err != nil { + return fmt.Errorf("failed to calculate bucket id for inline input: %w", err) + } + file := bucketID.String() + bucketTTL := p.cfg.FilestorageBucketTTL + + w, commit, abort, err := p.filestorage.CreateFile(ctx, bucketID, file, bucketTTL) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + + if _, err := w.Write([]byte(typedSrc.Content)); err != nil { + _ = abort() + return fmt.Errorf("failed to write content: %w", err) + } + + if err := commit(); err != nil { + _ = abort() + return fmt.Errorf("failed to commit file creation: %w", err) + } + + p.saveSource(src.GetID(), file) + case source.FilestorageBucketFile: + typedSrc := src.AsFilestorageBucketFile() + + bucketID := typedSrc.BucketID + file := typedSrc.File + ttl := p.cfg.FilestorageBucketTTL + downloadEndpoint := typedSrc.DownloadEndpoint + + if err := p.filestorage.DownloadFile(ctx, bucketID, file, ttl, downloadEndpoint); err != nil { + return fmt.Errorf("failed to download file %s: %w", bucketID, err) + } + + p.saveSource(src.GetID(), file) + default: + return fmt.Errorf("unknown source type '%s'", src.GetType()) + } + + return nil +} + +func (p *SourceProvider) saveSource(sourceID source.ID, file string) { + p.mu.Lock() + defer p.mu.Unlock() + + p.srcs[sourceID] = file +} + +func (p *SourceProvider) RemoveSource(ctx context.Context, src sources.Source) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.srcs, src.GetID()) +} + +func (p *SourceProvider) getSourceFile(ctx context.Context, sourceID source.ID) (string, bool) { + p.mu.Lock() + defer p.mu.Unlock() + + file, ok := p.srcs[sourceID] + return file, ok +} + +func (p *SourceProvider) Locate(ctx context.Context, sourceID source.ID) (path string, unlock func(), err error) { + file, ok := p.getSourceFile(ctx, sourceID) + if !ok { + err = fmt.Errorf("source %s not found", sourceID.String()) + return + } + + var bucketID bucket.ID + if err = bucketID.FromString(sourceID.String()); err != nil { + err = fmt.Errorf("failed to calculate bucket id: %w", err) + return + } + + return p.filestorage.LocateFile(ctx, bucketID, file) +} + +func (p *SourceProvider) Read(ctx context.Context, sourceID source.ID) (r io.Reader, unlock func(), err error) { + file, ok := p.getSourceFile(ctx, sourceID) + if !ok { + err = fmt.Errorf("source %s not found", sourceID.String()) + return + } + + var bucketID bucket.ID + if err = bucketID.FromString(sourceID.String()); err != nil { + err = fmt.Errorf("failed to calculate bucket id: %w", err) + return + } + + return p.filestorage.ReadFile(ctx, bucketID, file) +} diff --git a/Exesh/internal/registry/artifact_registry.go b/Exesh/internal/registry/artifact_registry.go index 928181ce..0480b644 100644 --- a/Exesh/internal/registry/artifact_registry.go +++ b/Exesh/internal/registry/artifact_registry.go @@ -2,7 +2,7 @@ package registry import ( "exesh/internal/config" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" "fmt" "log/slog" "math/rand/v2" @@ -18,7 +18,7 @@ type ( workerPool workerPool mu sync.Mutex - workerArtifacts map[string]map[execution.JobID]time.Time + workerArtifacts map[string]map[job.ID]time.Time } workerPool interface { @@ -33,11 +33,11 @@ func NewArtifactRegistry(log *slog.Logger, cfg config.ArtifactRegistryConfig, wo workerPool: workerPool, - workerArtifacts: make(map[string]map[execution.JobID]time.Time), + workerArtifacts: make(map[string]map[job.ID]time.Time), } } -func (r *ArtifactRegistry) GetWorker(jobID execution.JobID) (workerID string, err error) { +func (r *ArtifactRegistry) GetWorker(jobID job.ID) (workerID string, err error) { r.mu.Lock() defer r.mu.Unlock() @@ -61,13 +61,13 @@ func (r *ArtifactRegistry) GetWorker(jobID execution.JobID) (workerID string, er return } -func (r *ArtifactRegistry) PutArtifact(workerID string, jobID execution.JobID) { +func (r *ArtifactRegistry) PutArtifact(workerID string, jobID job.ID) { r.mu.Lock() defer r.mu.Unlock() artifacts := r.workerArtifacts[workerID] if artifacts == nil { - artifacts = make(map[execution.JobID]time.Time) + artifacts = make(map[job.ID]time.Time) } trashTime := time.Now().Add(r.cfg.ArtifactTTL) artifacts[jobID] = trashTime diff --git a/Exesh/internal/scheduler/execution_scheduler.go b/Exesh/internal/scheduler/execution_scheduler.go index 80cc9048..5aa0dad1 100644 --- a/Exesh/internal/scheduler/execution_scheduler.go +++ b/Exesh/internal/scheduler/execution_scheduler.go @@ -5,7 +5,14 @@ import ( "errors" "exesh/internal/config" "exesh/internal/domain/execution" + "exesh/internal/domain/execution/input" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/message/messages" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" "fmt" + "github.com/DIvanCode/filestorage/pkg/bucket" "log/slog" "sync/atomic" "time" @@ -21,7 +28,9 @@ type ( unitOfWork unitOfWork executionStorage executionStorage - jobFactory jobFactory + executionFactory executionFactory + artifactRegistry artifactRegistry + jobScheduler jobScheduler messageFactory messageFactory @@ -37,28 +46,32 @@ type ( } executionStorage interface { - GetExecutionForUpdate(context.Context, execution.ID) (*execution.Execution, error) - GetExecutionForSchedule(context.Context, time.Time) (*execution.Execution, error) - SaveExecution(context.Context, execution.Execution) error + GetExecutionForUpdate(context.Context, execution.ID) (*execution.Definition, error) + GetExecutionForSchedule(context.Context, time.Time) (*execution.Definition, error) + SaveExecution(context.Context, execution.Definition) error + } + + executionFactory interface { + Create(context.Context, execution.Definition) (*execution.Execution, error) } - jobFactory interface { - Create(context.Context, *execution.Context, execution.Step) (execution.Job, error) + artifactRegistry interface { + GetWorker(job.ID) (workerID string, err error) } jobScheduler interface { - Schedule(context.Context, execution.Job, jobCallback) + Schedule(context.Context, jobs.Job, []sources.Source, jobCallback) } messageFactory interface { - CreateExecutionStarted(*execution.Context) execution.Message - CreateForStep(*execution.Context, execution.Step, execution.Result) (execution.Message, error) - CreateExecutionFinished(*execution.Context) execution.Message - CreateExecutionFinishedError(*execution.Context, string) execution.Message + CreateExecutionStarted(execution.ID) messages.Message + CreateForJob(execution.ID, job.DefinitionName, results.Result) (messages.Message, error) + CreateExecutionFinished(execution.ID) messages.Message + CreateExecutionFinishedError(execution.ID, string) messages.Message } messageSender interface { - Send(context.Context, execution.Message) error + Send(context.Context, messages.Message) error } ) @@ -67,7 +80,8 @@ func NewExecutionScheduler( cfg config.ExecutionSchedulerConfig, unitOfWork unitOfWork, executionStorage executionStorage, - jobFactory jobFactory, + executionFactory executionFactory, + artifactRegistry artifactRegistry, jobScheduler jobScheduler, messageFactory messageFactory, messageSender messageSender, @@ -79,7 +93,9 @@ func NewExecutionScheduler( unitOfWork: unitOfWork, executionStorage: executionStorage, - jobFactory: jobFactory, + executionFactory: executionFactory, + artifactRegistry: artifactRegistry, + jobScheduler: jobScheduler, messageFactory: messageFactory, @@ -105,25 +121,17 @@ func (s *ExecutionScheduler) RegisterMetrics(r prometheus.Registerer) error { } func (s *ExecutionScheduler) Start(ctx context.Context) { - go func() { - err := s.runExecutionScheduler(ctx) - if errors.Is(err, context.Canceled) { - err = nil - } - if err != nil { - s.log.Error("execution scheduler exited with error", slog.Any("error", err)) - } - }() + go s.runExecutionScheduler(ctx) } -func (s *ExecutionScheduler) runExecutionScheduler(ctx context.Context) error { +func (s *ExecutionScheduler) runExecutionScheduler(ctx context.Context) { for { timer := time.NewTicker(s.cfg.ExecutionsInterval) select { case <-ctx.Done(): s.log.Info("exit execution scheduler") - return ctx.Err() + return case <-timer.C: break } @@ -137,28 +145,29 @@ func (s *ExecutionScheduler) runExecutionScheduler(ctx context.Context) error { s.changeNowExecutions(+1) if err := s.unitOfWork.Do(ctx, func(ctx context.Context) error { - e, err := s.executionStorage.GetExecutionForSchedule(ctx, time.Now().Add(-s.cfg.ExecutionRetryAfter)) + def, err := s.executionStorage.GetExecutionForSchedule(ctx, time.Now().Add(-s.cfg.ExecutionRetryAfter)) if err != nil { return fmt.Errorf("failed to get execution for schedule from storage: %w", err) } - if e == nil { + if def == nil { s.changeNowExecutions(-1) s.log.Debug("no executions to schedule") return nil } - e.SetScheduled(time.Now()) - - execCtx, err := e.BuildContext() + ex, err := s.executionFactory.Create(ctx, *def) if err != nil { - return fmt.Errorf("failed to build execution context: %w", err) + return fmt.Errorf("failed to create execution: %w", err) } - if err = s.scheduleExecution(ctx, &execCtx); err != nil { + + ex.SetScheduled(time.Now()) + + if err = s.scheduleExecution(ctx, ex); err != nil { return fmt.Errorf("failed to schedule execution: %w", err) } - if err = s.executionStorage.SaveExecution(ctx, *e); err != nil { - return fmt.Errorf("failed to update execution in storage %s: %w", e.ID.String(), err) + if err = s.executionStorage.SaveExecution(ctx, ex.Definition); err != nil { + return fmt.Errorf("failed to update execution in storage %s: %w", def.ID.String(), err) } return nil @@ -169,16 +178,16 @@ func (s *ExecutionScheduler) runExecutionScheduler(ctx context.Context) error { } } -func (s *ExecutionScheduler) scheduleExecution(ctx context.Context, execCtx *execution.Context) error { - s.log.Info("schedule execution", slog.String("execution_id", execCtx.ExecutionID.String())) +func (s *ExecutionScheduler) scheduleExecution(ctx context.Context, ex *execution.Execution) error { + s.log.Info("schedule execution", slog.String("execution_id", ex.ID.String())) - msg := s.messageFactory.CreateExecutionStarted(execCtx) + msg := s.messageFactory.CreateExecutionStarted(ex.ID) if err := s.messageSender.Send(ctx, msg); err != nil { return fmt.Errorf("failed to send execution started message: %w", err) } - for _, step := range execCtx.PickSteps() { - if err := s.scheduleStep(ctx, execCtx, step); err != nil { + for _, jb := range ex.PickJobs() { + if err := s.scheduleJob(ctx, ex, jb); err != nil { return err } } @@ -186,72 +195,102 @@ func (s *ExecutionScheduler) scheduleExecution(ctx context.Context, execCtx *exe return nil } -func (s *ExecutionScheduler) scheduleStep( +func (s *ExecutionScheduler) scheduleJob( ctx context.Context, - execCtx *execution.Context, - step execution.Step, + ex *execution.Execution, + jb jobs.Job, ) error { - if execCtx.IsDone() { + if ex.IsDone() { return nil } - s.log.Info("schedule step", slog.Any("step", step.GetName())) + s.log.Info("schedule job", slog.Any("id", jb.GetID())) + + srcs := make([]sources.Source, 0) + for _, in := range jb.GetInputs() { + if in.Type == input.Artifact { + var jobID job.ID + if err := jobID.FromString(in.SourceID.String()); err != nil { + return fmt.Errorf("failed to convert artifact source name to job id: %w", err) + } + var bucketID bucket.ID + if err := bucketID.FromString(jobID.String()); err != nil { + return fmt.Errorf("failed to convert artifact id to bucket id: %w", err) + } + workerID, err := s.artifactRegistry.GetWorker(jobID) + if err != nil { + return fmt.Errorf("failed to get worker for job %s: %w", jobID.String(), err) + } + out, ok := ex.OutputByJob[jobID] + if !ok { + return fmt.Errorf("failed to find output for job %s", jobID.String()) + } + file := out.File + + src := sources.NewFilestorageBucketFileSource(in.SourceID, bucketID, workerID, file) + srcs = append(srcs, src) + continue + } + + src, ok := ex.SourceByID[in.SourceID] + if !ok { + s.log.Error("failed to find source for job", + slog.Any("source", in.SourceID), + slog.Any("job", jb.GetID()), + slog.Any("execution", ex.ID)) + return fmt.Errorf("failed to find source for job") + } - job, err := s.jobFactory.Create(ctx, execCtx, step) - if err != nil { - s.log.Error("failed to create job for step", slog.Any("step_name", step.GetName()), slog.Any("error", err)) - return fmt.Errorf("failed to create job for step %s: %w", step.GetName(), err) + srcs = append(srcs, src) } - s.jobScheduler.Schedule(ctx, job, func(ctx context.Context, result execution.Result) { - if result.GetError() != nil { - s.failStep(ctx, execCtx, step, result) + s.jobScheduler.Schedule(ctx, jb, srcs, func(ctx context.Context, res results.Result) { + if res.GetError() != nil { + s.failJob(ctx, ex, jb, res) } else { - s.doneStep(ctx, execCtx, step, result) + s.doneJob(ctx, ex, jb, res) } }) - execCtx.ScheduledStep(step, job) - return nil } -func (s *ExecutionScheduler) failStep( +func (s *ExecutionScheduler) failJob( ctx context.Context, - execCtx *execution.Context, - step execution.Step, - result execution.Result, + ex *execution.Execution, + jb jobs.Job, + res results.Result, ) { - if execCtx.IsDone() { + if ex.IsDone() { return } - s.log.Info("fail step", - slog.Any("step", step.GetName()), - slog.Any("execution", execCtx.ExecutionID.String()), - slog.Any("error", result.GetError()), + s.log.Info("fail job", + slog.Any("job", jb.GetID()), + slog.Any("execution", ex.ID.String()), + slog.Any("error", res.GetError()), ) - s.finishExecution(ctx, execCtx, result.GetError()) + s.finishExecution(ctx, ex, res.GetError()) } -func (s *ExecutionScheduler) doneStep( +func (s *ExecutionScheduler) doneJob( ctx context.Context, - execCtx *execution.Context, - step execution.Step, - result execution.Result, + ex *execution.Execution, + jb jobs.Job, + res results.Result, ) { - if execCtx.IsDone() { + if ex.IsDone() { return } - s.log.Info("done step", - slog.Any("step", step.GetName()), - slog.Any("execution", execCtx.ExecutionID.String()), + s.log.Info("done job", + slog.Any("job", jb.GetID()), + slog.Any("execution", ex.ID.String()), ) if err := s.unitOfWork.Do(ctx, func(ctx context.Context) error { - e, err := s.executionStorage.GetExecutionForUpdate(ctx, execCtx.ExecutionID) + e, err := s.executionStorage.GetExecutionForUpdate(ctx, ex.ID) if err != nil { return fmt.Errorf("failed to get execution for update from storage: %w", err) } @@ -259,16 +298,17 @@ func (s *ExecutionScheduler) doneStep( return fmt.Errorf("failed to get execution for update from storage: not found") } - msg, err := s.messageFactory.CreateForStep(execCtx, step, result) + jobName := ex.JobDefinitionByID[jb.GetID()].GetName() + msg, err := s.messageFactory.CreateForJob(ex.ID, jobName, res) if err != nil { - return fmt.Errorf("failed to create message for step: %w", err) + return fmt.Errorf("failed to create message for job: %w", err) } if err = s.messageSender.Send(ctx, msg); err != nil { return fmt.Errorf("failed to send message for step: %w", err) } - execCtx.DoneStep(step.GetName()) + ex.DoneJob(jb.GetID(), res.GetStatus()) e.SetScheduled(time.Now()) @@ -277,81 +317,63 @@ func (s *ExecutionScheduler) doneStep( } return nil }); err != nil { - s.log.Error("failed to update execution in storage for done step", slog.Any("error", err)) - s.finishExecution( - ctx, - execCtx, - fmt.Errorf("failed to update execution in storage for done step %s: %w", step.GetName(), err)) + s.log.Error("failed to update execution in storage for done job", slog.Any("error", err)) + s.finishExecution(ctx, ex, + fmt.Errorf("failed to update execution in storage for done job %s: %w", jb.GetID(), err)) return } - if execCtx.IsDone() || result.ShouldFinishExecution() { - s.finishExecution(ctx, execCtx, nil) + if ex.IsDone() { + s.finishExecution(ctx, ex, nil) return } - for _, step = range execCtx.PickSteps() { - if err := s.scheduleStep(ctx, execCtx, step); err != nil { - s.log.Error("failed to schedule step", - slog.Any("step", step.GetName()), + for _, jb = range ex.PickJobs() { + if err := s.scheduleJob(ctx, ex, jb); err != nil { + s.log.Error("failed to schedule job", + slog.Any("job", jb.GetID()), slog.Any("error", err)) - s.finishExecution(ctx, execCtx, fmt.Errorf("failed to schedule step %s: %w", step.GetName(), err)) + s.finishExecution(ctx, ex, fmt.Errorf("failed to schedule job %s: %w", jb.GetID(), err)) } } } -func (s *ExecutionScheduler) getNowExecutions() int { - return int(s.nowExecutions.Load()) -} - -func (s *ExecutionScheduler) changeNowExecutions(delta int) { - s.nowExecutions.Add(int64(delta)) -} - func (s *ExecutionScheduler) finishExecution( ctx context.Context, - execCtx *execution.Context, - execError error, + ex *execution.Execution, + exError error, ) { - if execCtx.IsForceDone() { + if ex.IsForceFailed() { return } - if execError == nil { - s.log.Info("finish execution", slog.String("execution", execCtx.ExecutionID.String())) + if exError == nil { + s.log.Info("finish execution", slog.String("execution", ex.ID.String())) } else { s.log.Warn("finish execution with error", - slog.String("execution", execCtx.ExecutionID.String()), - slog.Any("error", execError)) + slog.String("execution", ex.ID.String()), + slog.Any("error", exError)) } defer s.changeNowExecutions(-1) - execCtx.ForceDone() + ex.ForceFail() if err := s.unitOfWork.Do(ctx, func(ctx context.Context) error { - e, err := s.executionStorage.GetExecutionForUpdate(ctx, execCtx.ExecutionID) - if err != nil { - return fmt.Errorf("failed to get execution for update from storage: %w", err) - } - if e == nil { - return fmt.Errorf("failed to get execution for update from storage: not found") - } - - var msg execution.Message - if execError == nil { - msg = s.messageFactory.CreateExecutionFinished(execCtx) + var msg messages.Message + if exError == nil { + msg = s.messageFactory.CreateExecutionFinished(ex.ID) } else { - msg = s.messageFactory.CreateExecutionFinishedError(execCtx, execError.Error()) + msg = s.messageFactory.CreateExecutionFinishedError(ex.ID, exError.Error()) } - if err = s.messageSender.Send(ctx, msg); err != nil { + if err := s.messageSender.Send(ctx, msg); err != nil { return fmt.Errorf("failed to send execution finished message: %w", err) } - e.SetFinished(time.Now()) + ex.SetFinished(time.Now()) - if err = s.executionStorage.SaveExecution(ctx, *e); err != nil { + if err := s.executionStorage.SaveExecution(ctx, ex.Definition); err != nil { return err } return nil @@ -360,3 +382,11 @@ func (s *ExecutionScheduler) finishExecution( return } } + +func (s *ExecutionScheduler) getNowExecutions() int { + return int(s.nowExecutions.Load()) +} + +func (s *ExecutionScheduler) changeNowExecutions(delta int) { + s.nowExecutions.Add(int64(delta)) +} diff --git a/Exesh/internal/scheduler/job_scheduler.go b/Exesh/internal/scheduler/job_scheduler.go index e705fb50..4b919347 100644 --- a/Exesh/internal/scheduler/job_scheduler.go +++ b/Exesh/internal/scheduler/job_scheduler.go @@ -2,58 +2,74 @@ package scheduler import ( "context" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" "exesh/internal/lib/queue" "log/slog" + "sync" ) type ( JobScheduler struct { log *slog.Logger - scheduledJobs queue.Queue[execution.Job] - jobCallbacks map[execution.JobID]*queue.Queue[jobCallback] + mu sync.Mutex + + scheduledJobs queue.Queue[jobs.Job] + jobSources map[job.ID][]sources.Source + jobCallback map[job.ID]jobCallback } - jobCallback func(context.Context, execution.Result) + jobCallback func(context.Context, results.Result) ) func NewJobScheduler(log *slog.Logger) *JobScheduler { return &JobScheduler{ log: log, - scheduledJobs: *queue.NewQueue[execution.Job](), - jobCallbacks: make(map[execution.JobID]*queue.Queue[jobCallback]), + mu: sync.Mutex{}, + + scheduledJobs: *queue.NewQueue[jobs.Job](), + jobSources: make(map[job.ID][]sources.Source), + jobCallback: make(map[job.ID]jobCallback), } } -func (s *JobScheduler) Schedule(ctx context.Context, job execution.Job, onJobDone jobCallback) { - s.scheduledJobs.Enqueue(job) +func (s *JobScheduler) Schedule(ctx context.Context, jb jobs.Job, srcs []sources.Source, onJobDone jobCallback) { + s.mu.Lock() + defer s.mu.Unlock() - jobCallbacks := s.jobCallbacks[job.GetID()] - if jobCallbacks == nil { - jobCallbacks = queue.NewQueue[jobCallback]() - } - jobCallbacks.Enqueue(onJobDone) - s.jobCallbacks[job.GetID()] = jobCallbacks + s.scheduledJobs.Enqueue(jb) + s.jobSources[jb.GetID()] = srcs + s.jobCallback[jb.GetID()] = onJobDone } -func (s *JobScheduler) PickJob(ctx context.Context, workerID string) *execution.Job { - return s.scheduledJobs.Dequeue() +func (s *JobScheduler) PickJob(ctx context.Context, workerID string) (*jobs.Job, []sources.Source) { + s.mu.Lock() + defer s.mu.Unlock() + + jb := s.scheduledJobs.Dequeue() + if jb == nil { + return jb, nil + } + + srcs := s.jobSources[jb.GetID()] + return jb, srcs } -func (s *JobScheduler) DoneJob(ctx context.Context, workerID string, result execution.Result) { - jobID := result.GetJobID() +func (s *JobScheduler) DoneJob(ctx context.Context, workerID string, res results.Result) { + s.mu.Lock() + defer s.mu.Unlock() - jobCallbacks := s.jobCallbacks[jobID] - if jobCallbacks == nil { - return - } + jobID := res.GetJobID() - jobCallback := jobCallbacks.Dequeue() - if jobCallback == nil { - return + if _, ok := s.jobSources[jobID]; ok { + delete(s.jobSources, jobID) + } + if callback, ok := s.jobCallback[jobID]; ok { + delete(s.jobCallback, jobID) + callback(ctx, res) } - - (*jobCallback)(ctx, result) } diff --git a/Exesh/internal/sender/message_sender.go b/Exesh/internal/sender/message_sender.go index 5d62043c..629e565c 100644 --- a/Exesh/internal/sender/message_sender.go +++ b/Exesh/internal/sender/message_sender.go @@ -4,7 +4,7 @@ import ( "context" "encoding/json" "exesh/internal/config" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/message/messages" "exesh/internal/domain/outbox" "fmt" "log/slog" @@ -64,7 +64,7 @@ func (s *KafkaSender) Start(ctx context.Context) { go s.run(ctx) } -func (s *KafkaSender) Send(ctx context.Context, msg execution.Message) error { +func (s *KafkaSender) Send(ctx context.Context, msg messages.Message) error { payload, err := json.Marshal(msg) if err != nil { return fmt.Errorf("failed to marshal message: %w", err) diff --git a/Exesh/internal/storage/postgres/execution_storage.go b/Exesh/internal/storage/postgres/execution_storage.go index 8afcdef8..ae9d1456 100644 --- a/Exesh/internal/storage/postgres/execution_storage.go +++ b/Exesh/internal/storage/postgres/execution_storage.go @@ -3,10 +3,8 @@ package postgres import ( "context" "database/sql" - "encoding/json" "errors" "exesh/internal/domain/execution" - "exesh/internal/domain/execution/steps" "fmt" "log/slog" "time" @@ -20,7 +18,8 @@ const ( createExecutionTableQuery = ` CREATE TABLE IF NOT EXISTS Executions( id varchar(36) PRIMARY KEY, - steps jsonb, + stages jsonb, + sources jsonb, status varchar(32), created_at timestamp, scheduled_at timestamp NULL, @@ -29,18 +28,18 @@ const ( ` insertExecutionQuery = ` - INSERT INTO Executions(id, steps, status, created_at, scheduled_at, finished_at) - VALUES ($1, $2, $3, $4, $5, $6); + INSERT INTO Executions(id, stages, sources, status, created_at, scheduled_at, finished_at) + VALUES ($1, $2, $3, $4, $5, $6, $7); ` selectExecutionForUpdateQuery = ` - SELECT id, steps, status, created_at, scheduled_at, finished_at FROM Executions + SELECT id, stages, sources, status, created_at, scheduled_at, finished_at FROM Executions WHERE id = $1 FOR UPDATE ` selectExecutionForScheduleQuery = ` - SELECT id, steps, status, created_at, scheduled_at, finished_at FROM Executions + SELECT id, stages, sources, status, created_at, scheduled_at, finished_at FROM Executions WHERE status = $1 OR (status = $2 AND scheduled_at < $3) ORDER BY created_at LIMIT 1 @@ -48,7 +47,7 @@ const ( ` updateExecutionQuery = ` - UPDATE Executions SET steps=$2, status=$3, created_at=$4, scheduled_at=$5, finished_at=$6 + UPDATE Executions SET stages=$2, sources=$3, status=$4, created_at=$5, scheduled_at=$6, finished_at=$7 WHERE id=$1; ` ) @@ -63,83 +62,56 @@ func NewExecutionStorage(ctx context.Context, log *slog.Logger) (*ExecutionStora return &ExecutionStorage{log: log}, nil } -func (s *ExecutionStorage) CreateExecution(ctx context.Context, e execution.Execution) error { +func (s *ExecutionStorage) CreateExecution(ctx context.Context, ex execution.Definition) error { tx := extractTx(ctx) if _, err := tx.ExecContext(ctx, insertExecutionQuery, - e.ID, e.Steps, e.Status, e.CreatedAt, e.ScheduledAt, e.FinishedAt); err != nil { + ex.ID, ex.Stages, ex.Sources, ex.Status, ex.CreatedAt, ex.ScheduledAt, ex.FinishedAt); err != nil { return fmt.Errorf("failed to do insert execution query: %w", err) } return nil } -func (s *ExecutionStorage) GetExecutionForUpdate(ctx context.Context, id execution.ID) (e *execution.Execution, err error) { +func (s *ExecutionStorage) GetExecutionForUpdate(ctx context.Context, id execution.ID) (*execution.Definition, error) { tx := extractTx(ctx) - e = &execution.Execution{} - var eid string - var stepsRaw json.RawMessage - if err = tx.QueryRowContext(ctx, selectExecutionForUpdateQuery, id). - Scan(&eid, &stepsRaw, &e.Status, &e.CreatedAt, &e.ScheduledAt, &e.FinishedAt); err != nil { + ex := execution.Definition{} + if err := tx.QueryRowContext(ctx, selectExecutionForUpdateQuery, id). + Scan(&ex.ID, &ex.Stages, &ex.Sources, &ex.Status, &ex.CreatedAt, &ex.ScheduledAt, &ex.FinishedAt); err != nil { if errors.Is(err, sql.ErrNoRows) { - e = nil - err = nil - return + return nil, nil } - err = fmt.Errorf("failed to do select execution for update query: %w", err) - return + return nil, fmt.Errorf("failed to do select execution for update query: %w", err) } - if err = e.ID.FromString(eid); err != nil { - err = fmt.Errorf("failed to unmarshal id: %w", err) - return - } - - if e.Steps, err = steps.UnmarshalStepsJSON(stepsRaw); err != nil { - err = fmt.Errorf("failed to unmarshal steps json: %w", err) - return - } - - return + return &ex, nil } -func (s *ExecutionStorage) GetExecutionForSchedule(ctx context.Context, retryBefore time.Time) (e *execution.Execution, err error) { +func (s *ExecutionStorage) GetExecutionForSchedule( + ctx context.Context, + retryBefore time.Time, +) (*execution.Definition, error) { tx := extractTx(ctx) - e = &execution.Execution{} - var eid string - var stepsRaw json.RawMessage - if err = tx.QueryRowContext(ctx, selectExecutionForScheduleQuery, - execution.StatusNewExecution, execution.StatusScheduledExecution, retryBefore). - Scan(&eid, &stepsRaw, &e.Status, &e.CreatedAt, &e.ScheduledAt, &e.FinishedAt); err != nil { + ex := execution.Definition{} + if err := tx.QueryRowContext(ctx, selectExecutionForScheduleQuery, + execution.StatusNew, execution.StatusScheduled, retryBefore). + Scan(&ex.ID, &ex.Stages, &ex.Sources, &ex.Status, &ex.CreatedAt, &ex.ScheduledAt, &ex.FinishedAt); err != nil { if errors.Is(err, sql.ErrNoRows) { - e = nil - err = nil - return + return nil, nil } - err = fmt.Errorf("failed to do select execution for schedule query: %w", err) - return - } - - if err = e.ID.FromString(eid); err != nil { - err = fmt.Errorf("failed to unmarshal id: %w", err) - return - } - - if e.Steps, err = steps.UnmarshalStepsJSON(stepsRaw); err != nil { - err = fmt.Errorf("failed to unmarshal steps json: %w", err) - return + return nil, fmt.Errorf("failed to do select execution for schedule query: %w", err) } - return + return &ex, nil } -func (s *ExecutionStorage) SaveExecution(ctx context.Context, e execution.Execution) error { +func (s *ExecutionStorage) SaveExecution(ctx context.Context, ex execution.Definition) error { tx := extractTx(ctx) if _, err := tx.ExecContext(ctx, updateExecutionQuery, - e.ID, e.Steps, e.Status, e.CreatedAt, e.ScheduledAt, e.FinishedAt); err != nil { + ex.ID, ex.Stages, ex.Sources, ex.Status, ex.CreatedAt, ex.ScheduledAt, ex.FinishedAt); err != nil { return fmt.Errorf("failed to do update execution query: %w", err) } diff --git a/Exesh/internal/storage/postgres/unit_of_work.go b/Exesh/internal/storage/postgres/unit_of_work.go index 07ed021c..a85ab924 100644 --- a/Exesh/internal/storage/postgres/unit_of_work.go +++ b/Exesh/internal/storage/postgres/unit_of_work.go @@ -29,7 +29,7 @@ func (u *UnitOfWork) Do(ctx context.Context, fn func(ctx context.Context) error) } if err := fn(withTx(ctx, tx)); err != nil { - tx.Rollback() + _ = tx.Rollback() return err } diff --git a/Exesh/internal/usecase/execute/dto.go b/Exesh/internal/usecase/execute/dto.go index 6ce62bbc..84867b97 100644 --- a/Exesh/internal/usecase/execute/dto.go +++ b/Exesh/internal/usecase/execute/dto.go @@ -1,10 +1,14 @@ package execute -import "exesh/internal/domain/execution" +import ( + "exesh/internal/domain/execution" + "exesh/internal/domain/execution/source/sources" +) type ( Command struct { - Steps []execution.Step + Sources []sources.Definition + Stages []execution.StageDefinition } Result struct { diff --git a/Exesh/internal/usecase/execute/usecase.go b/Exesh/internal/usecase/execute/usecase.go index 243c82cc..34f2f078 100644 --- a/Exesh/internal/usecase/execute/usecase.go +++ b/Exesh/internal/usecase/execute/usecase.go @@ -3,6 +3,8 @@ package execute import ( "context" "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/source" "fmt" "log/slog" ) @@ -19,7 +21,7 @@ type ( } executionStorage interface { - CreateExecution(context.Context, execution.Execution) error + CreateExecution(context.Context, execution.Definition) error } ) @@ -36,17 +38,36 @@ func NewUseCase( } func (uc *UseCase) Execute(ctx context.Context, command Command) (result Result, err error) { - steps := make(map[execution.StepName]any, len(command.Steps)) - for _, step := range command.Steps { - if _, has := steps[step.GetName()]; has { - err = fmt.Errorf("two or more steps have the same name '%s'", step.GetName()) + srcs := make(map[source.DefinitionName]any, len(command.Sources)) + for _, src := range command.Sources { + if _, exists := srcs[src.GetName()]; exists { + err = fmt.Errorf("two or more sources have the same name `%s`", src.GetName()) return } - steps[step.GetName()] = struct{}{} + srcs[src.GetName()] = src + } + + stages := make(map[execution.StageName]any, len(command.Stages)) + for _, stage := range command.Stages { + if _, exists := stages[stage.Name]; exists { + err = fmt.Errorf("two or more stages have the same name '%s'", stage.Name) + return + } + stages[stage.Name] = struct{}{} + + jbs := make(map[job.DefinitionName]any, len(stage.Jobs)) + for _, jb := range stage.Jobs { + jobName := jb.GetName() + if _, exists := jbs[jobName]; exists { + err = fmt.Errorf("two or more jobs in stage '%s' have the same name '%s'", stage.Name, jobName) + return + } + jbs[jobName] = struct{}{} + } } err = uc.unitOfWork.Do(ctx, func(ctx context.Context) error { - e := execution.NewExecution(command.Steps) + e := execution.NewExecutionDefinition(command.Stages, command.Sources) if err = uc.executionStorage.CreateExecution(ctx, e); err != nil { return fmt.Errorf("failed to create execution in storage: %w", err) } diff --git a/Exesh/internal/usecase/heartbeat/usecase.go b/Exesh/internal/usecase/heartbeat/usecase.go index a4d6451d..72c1dfe5 100644 --- a/Exesh/internal/usecase/heartbeat/usecase.go +++ b/Exesh/internal/usecase/heartbeat/usecase.go @@ -2,14 +2,17 @@ package heartbeat import ( "context" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" "log/slog" ) type ( Command struct { WorkerID string - DoneJobs []execution.Result + DoneJobs []results.Result FreeSlots int } @@ -26,12 +29,12 @@ type ( } jobScheduler interface { - PickJob(context.Context, string) *execution.Job - DoneJob(context.Context, string, execution.Result) + PickJob(context.Context, string) (*jobs.Job, []sources.Source) + DoneJob(context.Context, string, results.Result) } artifactRegistry interface { - PutArtifact(string, execution.JobID) + PutArtifact(string, job.ID) } ) @@ -45,7 +48,7 @@ func NewUseCase(log *slog.Logger, workerPool workerPool, jobScheduler jobSchedul } } -func (uc *UseCase) Heartbeat(ctx context.Context, command Command) ([]execution.Job, error) { +func (uc *UseCase) Heartbeat(ctx context.Context, command Command) ([]jobs.Job, []sources.Source, error) { uc.workerPool.Heartbeat(ctx, command.WorkerID) for _, jobResult := range command.DoneJobs { @@ -53,14 +56,16 @@ func (uc *UseCase) Heartbeat(ctx context.Context, command Command) ([]execution. uc.jobScheduler.DoneJob(ctx, command.WorkerID, jobResult) } - jobs := make([]execution.Job, 0) + jbs := make([]jobs.Job, 0) + srcs := make([]sources.Source, 0) for range command.FreeSlots { - jobSpec := uc.jobScheduler.PickJob(ctx, command.WorkerID) - if jobSpec == nil { + jb, src := uc.jobScheduler.PickJob(ctx, command.WorkerID) + if jb == nil { break } - jobs = append(jobs, *jobSpec) + jbs = append(jbs, *jb) + srcs = append(srcs, src...) } - return jobs, nil + return jbs, srcs, nil } diff --git a/Exesh/internal/worker/worker.go b/Exesh/internal/worker/worker.go index 1eebce21..b9aadb61 100644 --- a/Exesh/internal/worker/worker.go +++ b/Exesh/internal/worker/worker.go @@ -5,7 +5,9 @@ import ( "encoding/json" "exesh/internal/api/heartbeat" "exesh/internal/config" - "exesh/internal/domain/execution" + "exesh/internal/domain/execution/job/jobs" + "exesh/internal/domain/execution/result/results" + "exesh/internal/domain/execution/source/sources" "exesh/internal/lib/queue" "log/slog" "sync" @@ -20,23 +22,30 @@ type ( heartbeatClient heartbeatClient jobExecutor jobExecutor - jobs queue.Queue[execution.Job] + jobs queue.Queue[jobs.Job] + + sourceProvider sourceProvider mu sync.Mutex - doneJobs []execution.Result + doneJobs []results.Result freeSlots int } heartbeatClient interface { - Heartbeat(context.Context, string, []execution.Result, int) ([]execution.Job, error) + Heartbeat(context.Context, string, []results.Result, int) ([]jobs.Job, []sources.Source, error) + } + + sourceProvider interface { + SaveSource(ctx context.Context, src sources.Source) error + RemoveSource(ctx context.Context, src sources.Source) } jobExecutor interface { - Execute(context.Context, execution.Job) execution.Result + Execute(context.Context, jobs.Job) results.Result } ) -func NewWorker(log *slog.Logger, cfg config.WorkConfig, jobExecutor jobExecutor) *Worker { +func NewWorker(log *slog.Logger, cfg config.WorkConfig, sourceProvider sourceProvider, jobExecutor jobExecutor) *Worker { return &Worker{ log: log, cfg: cfg, @@ -44,10 +53,12 @@ func NewWorker(log *slog.Logger, cfg config.WorkConfig, jobExecutor jobExecutor) heartbeatClient: heartbeat.NewHeartbeatClient(cfg.CoordinatorEndpoint), jobExecutor: jobExecutor, - jobs: *queue.NewQueue[execution.Job](), + jobs: *queue.NewQueue[jobs.Job](), + + sourceProvider: sourceProvider, mu: sync.Mutex{}, - doneJobs: make([]execution.Result, 0), + doneJobs: make([]results.Result, 0), freeSlots: cfg.FreeSlots, } } @@ -80,15 +91,15 @@ func (w *Worker) runHeartbeat(ctx context.Context) { w.mu.Lock() - doneJobs := make([]execution.Result, len(w.doneJobs)) + doneJobs := make([]results.Result, len(w.doneJobs)) copy(doneJobs, w.doneJobs) - w.doneJobs = make([]execution.Result, 0) + w.doneJobs = make([]results.Result, 0) freeSlots := w.freeSlots - w.jobs.Size() w.mu.Unlock() - jobs, err := w.heartbeatClient.Heartbeat(ctx, w.cfg.WorkerID, doneJobs, freeSlots) + jbs, srcs, err := w.heartbeatClient.Heartbeat(ctx, w.cfg.WorkerID, doneJobs, freeSlots) if err != nil { w.log.Error("failed to do heartbeat request", slog.Any("err", err)) @@ -99,8 +110,14 @@ func (w *Worker) runHeartbeat(ctx context.Context) { continue } - for _, job := range jobs { - w.jobs.Enqueue(job) + for _, src := range srcs { + if err := w.sourceProvider.SaveSource(ctx, src); err != nil { + w.log.Error("failed to create source", slog.Any("err", err)) + } + } + + for _, jb := range jbs { + w.jobs.Enqueue(jb) } } } From 101deb5199317a3350a6539d5f7b6d351ddcf9c2 Mon Sep 17 00:00:00 2001 From: divancode Date: Mon, 2 Feb 2026 03:38:56 +0300 Subject: [PATCH 2/4] fix --- Exesh/config/coordinator/dev.yml | 4 +- Exesh/config/coordinator/docker.yml | 5 +- Exesh/internal/api/execute/api.go | 4 +- Exesh/internal/config/coordinator_config.go | 3 +- Exesh/internal/domain/execution/execution.go | 6 +- .../domain/execution/execution_definition.go | 8 +- Exesh/internal/domain/execution/graph.go | 156 ++++++++++++++++++ Exesh/internal/domain/execution/id.go | 19 +++ .../input/inputs/input_definition.go | 8 + .../internal/domain/execution/job/jobs/job.go | 8 + .../execution/job/jobs/job_definition.go | 8 + Exesh/internal/domain/execution/jobs_graph.go | 82 --------- .../execution/message/messages/message.go | 8 + .../domain/execution/result/result.go | 4 +- .../execution/result/results/check_result.go | 6 +- .../result/results/compile_result.go | 6 +- .../domain/execution/result/results/result.go | 8 + .../execution/result/results/run_result.go | 12 +- .../domain/execution/source/sources/source.go | 8 + .../source/sources/source_definition.go | 55 +++++- Exesh/internal/domain/execution/stage.go | 3 - .../domain/execution/stage_definition.go | 58 ++++++- .../internal/domain/execution/stages_graph.go | 115 ------------- Exesh/internal/factory/execution_factory.go | 12 +- .../provider/adapter/filestorage_adapter.go | 2 +- Exesh/internal/provider/source_provider.go | 55 +++--- .../internal/scheduler/execution_scheduler.go | 47 +++--- Exesh/internal/scheduler/job_scheduler.go | 23 ++- .../storage/postgres/execution_storage.go | 2 +- Exesh/internal/usecase/execute/dto.go | 4 +- Exesh/internal/usecase/execute/usecase.go | 2 +- Exesh/internal/worker/worker.go | 9 +- 32 files changed, 440 insertions(+), 310 deletions(-) create mode 100644 Exesh/internal/domain/execution/graph.go delete mode 100644 Exesh/internal/domain/execution/jobs_graph.go delete mode 100644 Exesh/internal/domain/execution/stages_graph.go diff --git a/Exesh/config/coordinator/dev.yml b/Exesh/config/coordinator/dev.yml index 77e08e88..6babaf1b 100644 --- a/Exesh/config/coordinator/dev.yml +++ b/Exesh/config/coordinator/dev.yml @@ -16,11 +16,11 @@ job_factory: run_output: output source_ttl: filestorage_bucket: 30m - inline: 30m + filestorage_endpoint: http://coordinator:5253 execution_scheduler: executions_interval: 5s max_concurrency: 10 - execution_retry_after: 300s + execution_retry_after: 30s worker_pool: worker_die_after: 10s artifact_registry: diff --git a/Exesh/config/coordinator/docker.yml b/Exesh/config/coordinator/docker.yml index 571056f2..1a5e6976 100644 --- a/Exesh/config/coordinator/docker.yml +++ b/Exesh/config/coordinator/docker.yml @@ -15,10 +15,13 @@ job_factory: output: compiled_binary: bin run_output: output + source_ttl: + filestorage_bucket: 30m + filestorage_endpoint: http://coordinator:5253 execution_scheduler: executions_interval: 3s max_concurrency: 10 - execution_retry_after: 300s + execution_retry_after: 30s worker_pool: worker_die_after: 10s artifact_registry: diff --git a/Exesh/internal/api/execute/api.go b/Exesh/internal/api/execute/api.go index 85ee6d67..ac602a15 100644 --- a/Exesh/internal/api/execute/api.go +++ b/Exesh/internal/api/execute/api.go @@ -8,8 +8,8 @@ import ( type ( Request struct { - Sources []sources.Definition `json:"sources"` - Stages []execution.StageDefinition `json:"stages"` + Sources sources.Definitions `json:"sources"` + Stages execution.StageDefinitions `json:"stages"` } Response struct { diff --git a/Exesh/internal/config/coordinator_config.go b/Exesh/internal/config/coordinator_config.go index b55cc51f..1806f3b3 100644 --- a/Exesh/internal/config/coordinator_config.go +++ b/Exesh/internal/config/coordinator_config.go @@ -40,8 +40,7 @@ type ( } `yaml:"output"` SourceTTL struct { FilestorageBucket time.Duration `yaml:"filestorage_bucket"` - Inline time.Duration `yaml:"inline"` - } + } `yaml:"source_ttl"` FilestorageEndpoint string `yaml:"filestorage_endpoint"` } diff --git a/Exesh/internal/domain/execution/execution.go b/Exesh/internal/domain/execution/execution.go index 04f7ff1e..6a9a1a8d 100644 --- a/Exesh/internal/domain/execution/execution.go +++ b/Exesh/internal/domain/execution/execution.go @@ -23,7 +23,7 @@ type ( OutputByJob map[job.ID]output.Output - graph *stagesGraph + graph *graph mu sync.Mutex forceFailed bool @@ -33,7 +33,7 @@ type ( func NewExecution(def Definition) *Execution { ex := Execution{ Definition: def, - Stages: make([]*Stage, len(def.Stages)), + Stages: make([]*Stage, 0, len(def.Stages)), JobByName: make(map[job.DefinitionName]jobs.Job), JobDefinitionByID: make(map[job.ID]jobs.Definition), @@ -48,7 +48,7 @@ func NewExecution(def Definition) *Execution { } func (ex *Execution) BuildGraph() { - ex.graph = newStagesGraph(ex.Stages) + ex.graph = newGraph(ex.Stages) } func (ex *Execution) PickJobs() []jobs.Job { diff --git a/Exesh/internal/domain/execution/execution_definition.go b/Exesh/internal/domain/execution/execution_definition.go index 756e76bd..36614907 100644 --- a/Exesh/internal/domain/execution/execution_definition.go +++ b/Exesh/internal/domain/execution/execution_definition.go @@ -8,8 +8,8 @@ import ( type ( Definition struct { ID ID - Stages []StageDefinition - Sources []sources.Definition + Stages StageDefinitions + Sources sources.Definitions Status Status CreatedAt time.Time ScheduledAt *time.Time @@ -25,7 +25,7 @@ const ( StatusFinished Status = "finished" ) -func NewExecutionDefinition(stages []StageDefinition, sources []sources.Definition) Definition { +func NewExecutionDefinition(stages StageDefinitions, sources sources.Definitions) Definition { return Definition{ ID: newID(), Stages: stages, @@ -52,5 +52,5 @@ func (def *Definition) SetFinished(finishedAt time.Time) { } def.Status = StatusFinished - def.ScheduledAt = &finishedAt + def.FinishedAt = &finishedAt } diff --git a/Exesh/internal/domain/execution/graph.go b/Exesh/internal/domain/execution/graph.go new file mode 100644 index 00000000..b2ba381d --- /dev/null +++ b/Exesh/internal/domain/execution/graph.go @@ -0,0 +1,156 @@ +package execution + +import ( + "exesh/internal/domain/execution/job" + "exesh/internal/domain/execution/job/jobs" + "sync" +) + +type graph struct { + mu sync.Mutex + + succStages map[StageName][]*Stage + doneStageDeps map[StageName]int + + stageByJobID map[job.ID]*Stage + + succJobs map[job.ID][]jobs.Job + doneJobDeps map[job.ID]int + + activeStages []*Stage + toPick map[StageName][]jobs.Job + + totalJobs map[StageName]int + doneJobs map[StageName]int +} + +func newGraph(stages []*Stage) *graph { + g := graph{ + mu: sync.Mutex{}, + + succStages: make(map[StageName][]*Stage), + doneStageDeps: make(map[StageName]int), + + stageByJobID: make(map[job.ID]*Stage), + + succJobs: make(map[job.ID][]jobs.Job), + doneJobDeps: make(map[job.ID]int), + + activeStages: make([]*Stage, 0), + toPick: make(map[StageName][]jobs.Job), + + totalJobs: make(map[StageName]int), + doneJobs: make(map[StageName]int), + } + + for _, stage := range stages { + for _, dep := range stage.Deps { + if _, ok := g.succStages[dep]; !ok { + g.succStages[dep] = make([]*Stage, 0) + } + g.succStages[dep] = append(g.succStages[dep], stage) + } + + g.doneStageDeps[stage.Name] = 0 + + g.toPick[stage.Name] = make([]jobs.Job, 0) + + for _, jb := range stage.Jobs { + g.stageByJobID[jb.GetID()] = stage + + deps := jb.GetDependencies() + + for _, dep := range deps { + if _, ok := g.succJobs[dep]; !ok { + g.succJobs[dep] = make([]jobs.Job, 0) + } + g.succJobs[dep] = append(g.succJobs[dep], jb) + } + + g.doneJobDeps[jb.GetID()] = 0 + if len(deps) == 0 { + g.toPick[stage.Name] = append(g.toPick[stage.Name], jb) + } + } + + g.totalJobs[stage.Name] = len(stage.Jobs) + g.doneJobs[stage.Name] = 0 + + if len(stage.Deps) == 0 { + g.activeStages = append(g.activeStages, stage) + } + } + + return &g +} + +func (g *graph) pickJobs() []jobs.Job { + g.mu.Lock() + defer g.mu.Unlock() + + pickedJobs := make([]jobs.Job, 0) + for _, stage := range g.activeStages { + for _, jb := range g.toPick[stage.Name] { + pickedJobs = append(pickedJobs, jb) + } + g.toPick[stage.Name] = make([]jobs.Job, 0) + } + + return pickedJobs +} + +func (g *graph) doneJob(jobID job.ID, jobStatus job.Status) { + g.mu.Lock() + defer g.mu.Unlock() + + stage := g.stageByJobID[jobID] + + var jb *jobs.Job + for i := range stage.Jobs { + if stage.Jobs[i].GetID() == jobID { + jb = &stage.Jobs[i] + break + } + } + if jb == nil { + return + } + + if jobStatus != jb.GetSuccessStatus() { + g.activeStages = make([]*Stage, 0) + return + } + + g.doneJobs[stage.Name]++ + for _, succJob := range g.succJobs[jobID] { + g.doneJobDeps[succJob.GetID()]++ + if g.doneJobDeps[succJob.GetID()] == len(succJob.GetDependencies()) { + g.toPick[stage.Name] = append(g.toPick[stage.Name], succJob) + } + } + + if g.doneJobs[stage.Name] == g.totalJobs[stage.Name] { + activeStages := make([]*Stage, 0) + for i := range g.activeStages { + if g.activeStages[i].Name != stage.Name { + activeStages = append(activeStages, g.activeStages[i]) + } + } + + for _, succStage := range g.succStages[stage.Name] { + g.doneStageDeps[succStage.Name]++ + if g.doneStageDeps[succStage.Name] == len(succStage.Deps) { + activeStages = append(activeStages, succStage) + } + } + + g.activeStages = activeStages + } +} + +func (g *graph) isDone() bool { + g.mu.Lock() + defer g.mu.Unlock() + + return len(g.activeStages) == 0 +} diff --git a/Exesh/internal/domain/execution/id.go b/Exesh/internal/domain/execution/id.go index 9b5b66a4..f0b94fd2 100644 --- a/Exesh/internal/domain/execution/id.go +++ b/Exesh/internal/domain/execution/id.go @@ -1,7 +1,9 @@ package execution import ( + "database/sql/driver" "encoding/json" + "fmt" "github.com/google/uuid" ) @@ -39,3 +41,20 @@ func (id *ID) UnmarshalJSON(data []byte) error { *id = ID(uid) return nil } + +func (id ID) Value() (driver.Value, error) { + return id.String(), nil +} + +func (id *ID) Scan(src any) error { + switch v := src.(type) { + case string: + return id.FromString(v) + case []byte: + return id.FromString(string(v)) + case nil: + return nil + default: + return fmt.Errorf("failed to scan execution id from type %T", src) + } +} diff --git a/Exesh/internal/domain/execution/input/inputs/input_definition.go b/Exesh/internal/domain/execution/input/inputs/input_definition.go index 5c8f8694..d1d5a734 100644 --- a/Exesh/internal/domain/execution/input/inputs/input_definition.go +++ b/Exesh/internal/domain/execution/input/inputs/input_definition.go @@ -10,6 +10,14 @@ type Definition struct { input.IDefinition } +func (def Definition) MarshalJSON() ([]byte, error) { + if def.IDefinition == nil { + return []byte("null"), nil + } + + return json.Marshal(def.IDefinition) +} + func (def *Definition) UnmarshalJSON(data []byte) error { var details input.DefinitionDetails if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/job/jobs/job.go b/Exesh/internal/domain/execution/job/jobs/job.go index 6c1b06c7..1c0a8e74 100644 --- a/Exesh/internal/domain/execution/job/jobs/job.go +++ b/Exesh/internal/domain/execution/job/jobs/job.go @@ -11,6 +11,14 @@ type Job struct { job.IJob } +func (jb Job) MarshalJSON() ([]byte, error) { + if jb.IJob == nil { + return []byte("null"), nil + } + + return json.Marshal(jb.IJob) +} + func (jb *Job) UnmarshalJSON(data []byte) error { var details job.Details if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/job/jobs/job_definition.go b/Exesh/internal/domain/execution/job/jobs/job_definition.go index b2004ab3..5154d677 100644 --- a/Exesh/internal/domain/execution/job/jobs/job_definition.go +++ b/Exesh/internal/domain/execution/job/jobs/job_definition.go @@ -10,6 +10,14 @@ type Definition struct { job.IDefinition } +func (def Definition) MarshalJSON() ([]byte, error) { + if def.IDefinition == nil { + return []byte("null"), nil + } + + return json.Marshal(def.IDefinition) +} + func (def *Definition) UnmarshalJSON(data []byte) error { var details job.DefinitionDetails if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/jobs_graph.go b/Exesh/internal/domain/execution/jobs_graph.go deleted file mode 100644 index f8337129..00000000 --- a/Exesh/internal/domain/execution/jobs_graph.go +++ /dev/null @@ -1,82 +0,0 @@ -package execution - -import ( - "exesh/internal/domain/execution/job" - "exesh/internal/domain/execution/job/jobs" - "sync" -) - -type jobsGraph struct { - mu sync.Mutex - - succJobs map[job.ID][]jobs.Job - doneDeps map[job.ID]int - - toPick []jobs.Job - - totalJobs int - doneJobs int -} - -func newJobsGraph(jbs []jobs.Job) *jobsGraph { - g := jobsGraph{ - mu: sync.Mutex{}, - - succJobs: make(map[job.ID][]jobs.Job), - doneDeps: make(map[job.ID]int), - - toPick: make([]jobs.Job, 0), - - totalJobs: len(jbs), - doneJobs: 0, - } - - for _, jb := range jbs { - deps := jb.GetDependencies() - - for _, dep := range deps { - if _, ok := g.succJobs[dep]; !ok { - g.succJobs[dep] = make([]jobs.Job, 0) - } - g.succJobs[dep] = append(g.succJobs[dep], jb) - } - - g.doneDeps[jb.GetID()] = 0 - if len(deps) == 0 { - g.toPick = append(g.toPick, jb) - } - } - - return &g -} - -func (g *jobsGraph) pickJobs() []jobs.Job { - g.mu.Lock() - defer g.mu.Unlock() - - pickedJobs := make([]jobs.Job, 0, len(g.toPick)) - copy(pickedJobs, g.toPick) - g.toPick = make([]jobs.Job, 0) - - return pickedJobs -} - -func (g *jobsGraph) doneJob(jobID job.ID) { - g.mu.Lock() - defer g.mu.Unlock() - - g.doneJobs++ - for _, succJob := range g.succJobs[jobID] { - g.doneDeps[succJob.GetID()]++ - if g.doneDeps[succJob.GetID()] == len(succJob.GetDependencies()) { - g.toPick = append(g.toPick, succJob) - } - } -} - -func (g *jobsGraph) isDone() bool { - g.mu.Lock() - defer g.mu.Unlock() - - return g.doneJobs == g.totalJobs -} diff --git a/Exesh/internal/domain/execution/message/messages/message.go b/Exesh/internal/domain/execution/message/messages/message.go index 3c4f37c4..dedf345c 100644 --- a/Exesh/internal/domain/execution/message/messages/message.go +++ b/Exesh/internal/domain/execution/message/messages/message.go @@ -10,6 +10,14 @@ type Message struct { message.IMessage } +func (msg Message) MarshalJSON() ([]byte, error) { + if msg.IMessage == nil { + return []byte("null"), nil + } + + return json.Marshal(msg.IMessage) +} + func (msg *Message) UnmarshalJSON(data []byte) error { var details message.Details if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/result/result.go b/Exesh/internal/domain/execution/result/result.go index ac3897d5..21e55886 100644 --- a/Exesh/internal/domain/execution/result/result.go +++ b/Exesh/internal/domain/execution/result/result.go @@ -17,7 +17,7 @@ type ( Details struct { Type Type `json:"type"` - ID job.ID `json:"id"` + JobID job.ID `json:"job_id"` Status job.Status `json:"status"` DoneAt time.Time `json:"done_at"` Error string `json:"error,omitempty"` @@ -37,7 +37,7 @@ func (res *Details) GetType() Type { } func (res *Details) GetJobID() job.ID { - return res.ID + return res.JobID } func (res *Details) GetStatus() job.Status { diff --git a/Exesh/internal/domain/execution/result/results/check_result.go b/Exesh/internal/domain/execution/result/results/check_result.go index 89df6442..503e0476 100644 --- a/Exesh/internal/domain/execution/result/results/check_result.go +++ b/Exesh/internal/domain/execution/result/results/check_result.go @@ -15,7 +15,7 @@ func NewCheckResultOK(jobID job.ID) Result { &CompileResult{ Details: result.Details{ Type: result.Check, - ID: jobID, + JobID: jobID, Status: job.StatusOK, DoneAt: time.Now(), }, @@ -28,7 +28,7 @@ func NewCheckResultWA(jobID job.ID) Result { &CompileResult{ Details: result.Details{ Type: result.Check, - ID: jobID, + JobID: jobID, Status: job.StatusWA, DoneAt: time.Now(), }, @@ -41,7 +41,7 @@ func NewCheckResultErr(jobID job.ID, err string) Result { &CompileResult{ Details: result.Details{ Type: result.Check, - ID: jobID, + JobID: jobID, DoneAt: time.Now(), Error: err, }, diff --git a/Exesh/internal/domain/execution/result/results/compile_result.go b/Exesh/internal/domain/execution/result/results/compile_result.go index cea43d43..cd253221 100644 --- a/Exesh/internal/domain/execution/result/results/compile_result.go +++ b/Exesh/internal/domain/execution/result/results/compile_result.go @@ -16,7 +16,7 @@ func NewCompileResultOK(jobID job.ID) Result { &CompileResult{ Details: result.Details{ Type: result.Compile, - ID: jobID, + JobID: jobID, Status: job.StatusOK, DoneAt: time.Now(), }, @@ -29,7 +29,7 @@ func NewCompileResultCE(jobID job.ID, compilationError string) Result { &CompileResult{ Details: result.Details{ Type: result.Compile, - ID: jobID, + JobID: jobID, Status: job.StatusCE, DoneAt: time.Now(), }, @@ -43,7 +43,7 @@ func NewCompileResultErr(jobID job.ID, err string) Result { &CompileResult{ Details: result.Details{ Type: result.Compile, - ID: jobID, + JobID: jobID, DoneAt: time.Now(), Error: err, }, diff --git a/Exesh/internal/domain/execution/result/results/result.go b/Exesh/internal/domain/execution/result/results/result.go index 3a54601a..68f91377 100644 --- a/Exesh/internal/domain/execution/result/results/result.go +++ b/Exesh/internal/domain/execution/result/results/result.go @@ -10,6 +10,14 @@ type Result struct { result.IResult } +func (res Result) MarshalJSON() ([]byte, error) { + if res.IResult == nil { + return []byte("null"), nil + } + + return json.Marshal(res.IResult) +} + func (res *Result) UnmarshalJSON(data []byte) error { var details result.Details if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/result/results/run_result.go b/Exesh/internal/domain/execution/result/results/run_result.go index cb786779..4c7fd3ef 100644 --- a/Exesh/internal/domain/execution/result/results/run_result.go +++ b/Exesh/internal/domain/execution/result/results/run_result.go @@ -17,7 +17,7 @@ func NewRunResultOK(jobID job.ID) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, Status: job.StatusOK, DoneAt: time.Now(), }, @@ -31,7 +31,7 @@ func NewRunResultWithOutput(jobID job.ID, out string) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, Status: job.StatusOK, DoneAt: time.Now(), }, @@ -46,7 +46,7 @@ func NewRunResultTL(jobID job.ID) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, Status: job.StatusTL, DoneAt: time.Now(), }, @@ -60,7 +60,7 @@ func NewRunResultML(jobID job.ID) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, Status: job.StatusML, DoneAt: time.Now(), }, @@ -74,7 +74,7 @@ func NewRunResultRE(jobID job.ID) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, Status: job.StatusRE, DoneAt: time.Now(), }, @@ -88,7 +88,7 @@ func NewRunResultErr(jobID job.ID, err string) Result { &RunResult{ Details: result.Details{ Type: result.Run, - ID: jobID, + JobID: jobID, DoneAt: time.Now(), Error: err, }, diff --git a/Exesh/internal/domain/execution/source/sources/source.go b/Exesh/internal/domain/execution/source/sources/source.go index 33a2f8b9..0ff96cd9 100644 --- a/Exesh/internal/domain/execution/source/sources/source.go +++ b/Exesh/internal/domain/execution/source/sources/source.go @@ -10,6 +10,14 @@ type Source struct { source.ISource } +func (src Source) MarshalJSON() ([]byte, error) { + if src.ISource == nil { + return []byte("null"), nil + } + + return json.Marshal(src.ISource) +} + func (src *Source) UnmarshalJSON(data []byte) error { var details source.Details if err := json.Unmarshal(data, &details); err != nil { diff --git a/Exesh/internal/domain/execution/source/sources/source_definition.go b/Exesh/internal/domain/execution/source/sources/source_definition.go index 54b43ec9..4f8cde49 100644 --- a/Exesh/internal/domain/execution/source/sources/source_definition.go +++ b/Exesh/internal/domain/execution/source/sources/source_definition.go @@ -1,13 +1,26 @@ package sources import ( + "database/sql/driver" "encoding/json" "exesh/internal/domain/execution/source" "fmt" ) -type Definition struct { - source.IDefinition +type ( + Definition struct { + source.IDefinition + } + + Definitions []Definition +) + +func (def Definition) MarshalJSON() ([]byte, error) { + if def.IDefinition == nil { + return []byte("null"), nil + } + + return json.Marshal(def.IDefinition) } func (def *Definition) UnmarshalJSON(data []byte) error { @@ -34,6 +47,44 @@ func (def *Definition) UnmarshalJSON(data []byte) error { return nil } +func (defs Definitions) Value() (driver.Value, error) { + b, err := json.Marshal(defs) + if err != nil { + return nil, fmt.Errorf("failed to marshal source definitions: %w", err) + } + return b, nil +} + +func (defs *Definitions) Scan(src any) error { + if src == nil { + *defs = nil + return nil + } + + var data []byte + switch v := src.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("failed to scan source definitions from type %T", src) + } + + if len(data) == 0 { + *defs = nil + return nil + } + + var out Definitions + if err := json.Unmarshal(data, &out); err != nil { + return fmt.Errorf("failed to unmarshal source definitions: %w", err) + } + + *defs = out + return nil +} + func (def *Definition) AsInlineDefinition() *InlineSourceDefinition { return def.IDefinition.(*InlineSourceDefinition) } diff --git a/Exesh/internal/domain/execution/stage.go b/Exesh/internal/domain/execution/stage.go index 44e22223..91139a3f 100644 --- a/Exesh/internal/domain/execution/stage.go +++ b/Exesh/internal/domain/execution/stage.go @@ -13,6 +13,3 @@ type ( StageName string ) - -func (stage *Stage) BuildGraph() { -} diff --git a/Exesh/internal/domain/execution/stage_definition.go b/Exesh/internal/domain/execution/stage_definition.go index 263e987d..86ccadbe 100644 --- a/Exesh/internal/domain/execution/stage_definition.go +++ b/Exesh/internal/domain/execution/stage_definition.go @@ -1,9 +1,57 @@ package execution -import "exesh/internal/domain/execution/job/jobs" +import ( + "database/sql/driver" + "encoding/json" + "exesh/internal/domain/execution/job/jobs" + "fmt" +) -type StageDefinition struct { - Name StageName `json:"name"` - Deps []StageName `json:"deps"` - Jobs []jobs.Definition `json:"jobs"` +type ( + StageDefinition struct { + Name StageName `json:"name"` + Deps []StageName `json:"deps"` + Jobs []jobs.Definition `json:"jobs"` + } + + StageDefinitions []StageDefinition +) + +func (s StageDefinitions) Value() (driver.Value, error) { + b, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("failed to marshal stage definitions: %w", err) + } + return b, nil +} + +func (s *StageDefinitions) Scan(src any) error { + if src == nil { + *s = nil + return nil + } + + var data []byte + + switch v := src.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + return fmt.Errorf("failed to scan stage definitions from type %T", src) + } + + if len(data) == 0 { + *s = nil + return nil + } + + var out StageDefinitions + if err := json.Unmarshal(data, &out); err != nil { + return fmt.Errorf("failed to unmarshal stage definitions: %w", err) + } + + *s = out + return nil } diff --git a/Exesh/internal/domain/execution/stages_graph.go b/Exesh/internal/domain/execution/stages_graph.go deleted file mode 100644 index ac9c250d..00000000 --- a/Exesh/internal/domain/execution/stages_graph.go +++ /dev/null @@ -1,115 +0,0 @@ -package execution - -import ( - "exesh/internal/domain/execution/job" - "exesh/internal/domain/execution/job/jobs" - "sync" -) - -type stagesGraph struct { - mu sync.Mutex - - succStages map[StageName][]*Stage - doneDeps map[StageName]int - - toPick []*Stage - - jobGraphs map[StageName]*jobsGraph - stageByJobID map[job.ID]*Stage -} - -func newStagesGraph(stages []*Stage) *stagesGraph { - g := stagesGraph{ - mu: sync.Mutex{}, - - succStages: make(map[StageName][]*Stage), - doneDeps: make(map[StageName]int), - - toPick: make([]*Stage, 0), - - jobGraphs: make(map[StageName]*jobsGraph), - stageByJobID: make(map[job.ID]*Stage), - } - - for _, stage := range stages { - for _, dep := range stage.Deps { - if _, ok := g.succStages[dep]; !ok { - g.succStages[dep] = make([]*Stage, 0) - } - g.succStages[dep] = append(g.succStages[dep], stage) - } - - g.doneDeps[stage.Name] = 0 - if len(stage.Deps) == 0 { - g.toPick = append(g.toPick, stage) - } - - g.jobGraphs[stage.Name] = newJobsGraph(stage.Jobs) - for _, jb := range stage.Jobs { - g.stageByJobID[jb.GetID()] = stage - } - } - - return &g -} - -func (g *stagesGraph) pickJobs() []jobs.Job { - g.mu.Lock() - defer g.mu.Unlock() - - pickedJobs := make([]jobs.Job, 0) - for _, stage := range g.toPick { - pickedJobs = append(pickedJobs, g.jobGraphs[stage.Name].pickJobs()...) - } - - return pickedJobs -} - -func (g *stagesGraph) doneJob(jobID job.ID, jobStatus job.Status) { - g.mu.Lock() - defer g.mu.Unlock() - - stage := g.stageByJobID[jobID] - - var jb *jobs.Job - for i := range stage.Jobs { - if stage.Jobs[i].GetID() == jobID { - jb = &stage.Jobs[i] - break - } - } - if jb == nil { - return - } - - if jobStatus != jb.GetSuccessStatus() { - g.toPick = make([]*Stage, 0) - return - } - - g.jobGraphs[stage.Name].doneJob(jobID) - if g.jobGraphs[stage.Name].isDone() { - toPick := make([]*Stage, 0) - for i := range g.toPick { - if g.toPick[i].Name != stage.Name { - toPick = append(toPick, g.toPick[i]) - } - } - - for _, succStage := range g.succStages[stage.Name] { - g.doneDeps[succStage.Name]++ - if g.doneDeps[succStage.Name] == len(succStage.Deps) { - toPick = append(toPick, succStage) - } - } - - g.toPick = toPick - } -} - -func (g *stagesGraph) isDone() bool { - g.mu.Lock() - defer g.mu.Unlock() - - return len(g.toPick) == 0 -} diff --git a/Exesh/internal/factory/execution_factory.go b/Exesh/internal/factory/execution_factory.go index 03cc6daf..813b8097 100644 --- a/Exesh/internal/factory/execution_factory.go +++ b/Exesh/internal/factory/execution_factory.go @@ -100,7 +100,7 @@ func (f *ExecutionFactory) createStage(ex *execution.Execution, def execution.St stage := execution.Stage{ Name: def.Name, Deps: def.Deps, - Jobs: make([]jobs.Job, len(def.Jobs)), + Jobs: make([]jobs.Job, 0, len(def.Jobs)), } for _, jobDef := range def.Jobs { @@ -113,8 +113,6 @@ func (f *ExecutionFactory) createStage(ex *execution.Execution, def execution.St ex.JobByName[jobDef.GetName()] = jb } - stage.BuildGraph() - return &stage, nil } @@ -221,6 +219,8 @@ func (f *ExecutionFactory) createJob(ex *execution.Execution, def jobs.Definitio return jb, fmt.Errorf("unknown job type %s", def.GetType()) } + ex.JobDefinitionByID[jb.GetID()] = def + out := jb.GetOutput() if out != nil { ex.OutputByJob[jb.GetID()] = *out @@ -260,7 +260,7 @@ func (f *ExecutionFactory) createInput(ex *execution.Execution, def inputs.Defin } typedSrcDef := srcDef.AsFilestorageBucketDefinition() - sourceID, err := f.calculateSourceID(ex.ID.String(), string(srcDef.GetName())) + sourceID, err := f.calculateSourceID(ex.ID.String(), string(srcDef.GetName()), typedDef.File) if err != nil { return in, fmt.Errorf("failed to calculate source id: %w", err) } @@ -304,8 +304,8 @@ func (f *ExecutionFactory) createInput(ex *execution.Execution, def inputs.Defin } jobID := jb.GetID() - sourceID, err := f.calculateSourceID(ex.ID.String(), jobID.String()) - if err != nil { + var sourceID source.ID + if err := sourceID.FromString(jobID.String()); err != nil { return in, fmt.Errorf("failed to calculate source id: %w", err) } diff --git a/Exesh/internal/provider/adapter/filestorage_adapter.go b/Exesh/internal/provider/adapter/filestorage_adapter.go index 9203b8f6..357d549b 100644 --- a/Exesh/internal/provider/adapter/filestorage_adapter.go +++ b/Exesh/internal/provider/adapter/filestorage_adapter.go @@ -177,7 +177,7 @@ func (a *FilestorageAdapter) LocateFile( file string, ) (path string, unlock func(), err error) { path, unlock, err = a.filestorage.GetFile(ctx, bucketID, file) - return filepath.Join(path, file), unlock, nil + return filepath.Join(path, file), unlock, err } // ReadFile diff --git a/Exesh/internal/provider/source_provider.go b/Exesh/internal/provider/source_provider.go index 0ca9b541..34b9ec2d 100644 --- a/Exesh/internal/provider/source_provider.go +++ b/Exesh/internal/provider/source_provider.go @@ -11,13 +11,20 @@ import ( "sync" ) -type SourceProvider struct { - cfg config.SourceProviderConfig - filestorage filestorage +type ( + SourceProvider struct { + cfg config.SourceProviderConfig + filestorage filestorage - mu sync.Mutex - srcs map[source.ID]string -} + mu sync.Mutex + srcs map[source.ID]savedSource + } + + savedSource struct { + BucketID bucket.ID + File string + } +) func NewSourceProvider(cfg config.SourceProviderConfig, filestorage filestorage) *SourceProvider { return &SourceProvider{ @@ -25,7 +32,7 @@ func NewSourceProvider(cfg config.SourceProviderConfig, filestorage filestorage) filestorage: filestorage, mu: sync.Mutex{}, - srcs: make(map[source.ID]string), + srcs: make(map[source.ID]savedSource), } } @@ -57,7 +64,7 @@ func (p *SourceProvider) SaveSource(ctx context.Context, src sources.Source) err return fmt.Errorf("failed to commit file creation: %w", err) } - p.saveSource(src.GetID(), file) + p.saveSource(src.GetID(), bucketID, file) case source.FilestorageBucketFile: typedSrc := src.AsFilestorageBucketFile() @@ -70,7 +77,7 @@ func (p *SourceProvider) SaveSource(ctx context.Context, src sources.Source) err return fmt.Errorf("failed to download file %s: %w", bucketID, err) } - p.saveSource(src.GetID(), file) + p.saveSource(src.GetID(), bucketID, file) default: return fmt.Errorf("unknown source type '%s'", src.GetType()) } @@ -78,11 +85,11 @@ func (p *SourceProvider) SaveSource(ctx context.Context, src sources.Source) err return nil } -func (p *SourceProvider) saveSource(sourceID source.ID, file string) { +func (p *SourceProvider) saveSource(sourceID source.ID, bucketID bucket.ID, file string) { p.mu.Lock() defer p.mu.Unlock() - p.srcs[sourceID] = file + p.srcs[sourceID] = savedSource{BucketID: bucketID, File: file} } func (p *SourceProvider) RemoveSource(ctx context.Context, src sources.Source) { @@ -92,42 +99,30 @@ func (p *SourceProvider) RemoveSource(ctx context.Context, src sources.Source) { delete(p.srcs, src.GetID()) } -func (p *SourceProvider) getSourceFile(ctx context.Context, sourceID source.ID) (string, bool) { +func (p *SourceProvider) getSavedSource(ctx context.Context, sourceID source.ID) (savedSource, bool) { p.mu.Lock() defer p.mu.Unlock() - file, ok := p.srcs[sourceID] - return file, ok + src, ok := p.srcs[sourceID] + return src, ok } func (p *SourceProvider) Locate(ctx context.Context, sourceID source.ID) (path string, unlock func(), err error) { - file, ok := p.getSourceFile(ctx, sourceID) + src, ok := p.getSavedSource(ctx, sourceID) if !ok { err = fmt.Errorf("source %s not found", sourceID.String()) return } - var bucketID bucket.ID - if err = bucketID.FromString(sourceID.String()); err != nil { - err = fmt.Errorf("failed to calculate bucket id: %w", err) - return - } - - return p.filestorage.LocateFile(ctx, bucketID, file) + return p.filestorage.LocateFile(ctx, src.BucketID, src.File) } func (p *SourceProvider) Read(ctx context.Context, sourceID source.ID) (r io.Reader, unlock func(), err error) { - file, ok := p.getSourceFile(ctx, sourceID) + src, ok := p.getSavedSource(ctx, sourceID) if !ok { err = fmt.Errorf("source %s not found", sourceID.String()) return } - var bucketID bucket.ID - if err = bucketID.FromString(sourceID.String()); err != nil { - err = fmt.Errorf("failed to calculate bucket id: %w", err) - return - } - - return p.filestorage.ReadFile(ctx, bucketID, file) + return p.filestorage.ReadFile(ctx, src.BucketID, src.File) } diff --git a/Exesh/internal/scheduler/execution_scheduler.go b/Exesh/internal/scheduler/execution_scheduler.go index 5aa0dad1..762eae63 100644 --- a/Exesh/internal/scheduler/execution_scheduler.go +++ b/Exesh/internal/scheduler/execution_scheduler.go @@ -204,26 +204,27 @@ func (s *ExecutionScheduler) scheduleJob( return nil } - s.log.Info("schedule job", slog.Any("id", jb.GetID())) + jobID := jb.GetID() + s.log.Info("schedule job", slog.String("job", jobID.String())) srcs := make([]sources.Source, 0) for _, in := range jb.GetInputs() { if in.Type == input.Artifact { - var jobID job.ID - if err := jobID.FromString(in.SourceID.String()); err != nil { + var inputJobID job.ID + if err := inputJobID.FromString(in.SourceID.String()); err != nil { return fmt.Errorf("failed to convert artifact source name to job id: %w", err) } var bucketID bucket.ID - if err := bucketID.FromString(jobID.String()); err != nil { + if err := bucketID.FromString(inputJobID.String()); err != nil { return fmt.Errorf("failed to convert artifact id to bucket id: %w", err) } - workerID, err := s.artifactRegistry.GetWorker(jobID) + workerID, err := s.artifactRegistry.GetWorker(inputJobID) if err != nil { - return fmt.Errorf("failed to get worker for job %s: %w", jobID.String(), err) + return fmt.Errorf("failed to get worker for job %s: %w", inputJobID.String(), err) } - out, ok := ex.OutputByJob[jobID] + out, ok := ex.OutputByJob[inputJobID] if !ok { - return fmt.Errorf("failed to find output for job %s", jobID.String()) + return fmt.Errorf("failed to find output for job %s", inputJobID.String()) } file := out.File @@ -234,10 +235,11 @@ func (s *ExecutionScheduler) scheduleJob( src, ok := ex.SourceByID[in.SourceID] if !ok { + inputJobID := jb.GetID() s.log.Error("failed to find source for job", - slog.Any("source", in.SourceID), - slog.Any("job", jb.GetID()), - slog.Any("execution", ex.ID)) + slog.String("source", in.SourceID.String()), + slog.String("job", inputJobID.String()), + slog.String("execution", ex.ID.String())) return fmt.Errorf("failed to find source for job") } @@ -265,9 +267,10 @@ func (s *ExecutionScheduler) failJob( return } + jobID := jb.GetID() s.log.Info("fail job", - slog.Any("job", jb.GetID()), - slog.Any("execution", ex.ID.String()), + slog.String("job", jobID.String()), + slog.String("execution", ex.ID.String()), slog.Any("error", res.GetError()), ) @@ -284,9 +287,10 @@ func (s *ExecutionScheduler) doneJob( return } + jobID := jb.GetID() s.log.Info("done job", - slog.Any("job", jb.GetID()), - slog.Any("execution", ex.ID.String()), + slog.String("job", jobID.String()), + slog.String("execution", ex.ID.String()), ) if err := s.unitOfWork.Do(ctx, func(ctx context.Context) error { @@ -298,7 +302,7 @@ func (s *ExecutionScheduler) doneJob( return fmt.Errorf("failed to get execution for update from storage: not found") } - jobName := ex.JobDefinitionByID[jb.GetID()].GetName() + jobName := ex.JobDefinitionByID[jobID].GetName() msg, err := s.messageFactory.CreateForJob(ex.ID, jobName, res) if err != nil { return fmt.Errorf("failed to create message for job: %w", err) @@ -308,7 +312,7 @@ func (s *ExecutionScheduler) doneJob( return fmt.Errorf("failed to send message for step: %w", err) } - ex.DoneJob(jb.GetID(), res.GetStatus()) + ex.DoneJob(jobID, res.GetStatus()) e.SetScheduled(time.Now()) @@ -328,12 +332,13 @@ func (s *ExecutionScheduler) doneJob( return } - for _, jb = range ex.PickJobs() { - if err := s.scheduleJob(ctx, ex, jb); err != nil { + for _, pickedJob := range ex.PickJobs() { + if err := s.scheduleJob(ctx, ex, pickedJob); err != nil { + pickedJobID := pickedJob.GetID() s.log.Error("failed to schedule job", - slog.Any("job", jb.GetID()), + slog.String("job", pickedJobID.String()), slog.Any("error", err)) - s.finishExecution(ctx, ex, fmt.Errorf("failed to schedule job %s: %w", jb.GetID(), err)) + s.finishExecution(ctx, ex, fmt.Errorf("failed to schedule job %s: %w", pickedJobID, err)) } } } diff --git a/Exesh/internal/scheduler/job_scheduler.go b/Exesh/internal/scheduler/job_scheduler.go index 4b919347..4e7d3ff1 100644 --- a/Exesh/internal/scheduler/job_scheduler.go +++ b/Exesh/internal/scheduler/job_scheduler.go @@ -60,16 +60,25 @@ func (s *JobScheduler) PickJob(ctx context.Context, workerID string) (*jobs.Job, } func (s *JobScheduler) DoneJob(ctx context.Context, workerID string, res results.Result) { - s.mu.Lock() - defer s.mu.Unlock() + prepareCallback := func() jobCallback { + s.mu.Lock() + defer s.mu.Unlock() - jobID := res.GetJobID() + jobID := res.GetJobID() - if _, ok := s.jobSources[jobID]; ok { - delete(s.jobSources, jobID) + if _, ok := s.jobSources[jobID]; ok { + delete(s.jobSources, jobID) + } + if callback, ok := s.jobCallback[jobID]; ok { + delete(s.jobCallback, jobID) + return callback + } + + return nil } - if callback, ok := s.jobCallback[jobID]; ok { - delete(s.jobCallback, jobID) + + callback := prepareCallback() + if callback != nil { callback(ctx, res) } } diff --git a/Exesh/internal/storage/postgres/execution_storage.go b/Exesh/internal/storage/postgres/execution_storage.go index ae9d1456..d92c5784 100644 --- a/Exesh/internal/storage/postgres/execution_storage.go +++ b/Exesh/internal/storage/postgres/execution_storage.go @@ -43,7 +43,7 @@ const ( WHERE status = $1 OR (status = $2 AND scheduled_at < $3) ORDER BY created_at LIMIT 1 - FOR UPDATE; + FOR UPDATE SKIP LOCKED; ` updateExecutionQuery = ` diff --git a/Exesh/internal/usecase/execute/dto.go b/Exesh/internal/usecase/execute/dto.go index 84867b97..a9389e6c 100644 --- a/Exesh/internal/usecase/execute/dto.go +++ b/Exesh/internal/usecase/execute/dto.go @@ -7,8 +7,8 @@ import ( type ( Command struct { - Sources []sources.Definition - Stages []execution.StageDefinition + Sources sources.Definitions + Stages execution.StageDefinitions } Result struct { diff --git a/Exesh/internal/usecase/execute/usecase.go b/Exesh/internal/usecase/execute/usecase.go index 34f2f078..04df0480 100644 --- a/Exesh/internal/usecase/execute/usecase.go +++ b/Exesh/internal/usecase/execute/usecase.go @@ -76,7 +76,7 @@ func (uc *UseCase) Execute(ctx context.Context, command Command) (result Result, return nil }) - uc.log.Info("created execution", slog.Any("execution_id", result.ExecutionID)) + uc.log.Info("created execution", slog.String("execution", result.ExecutionID.String())) return } diff --git a/Exesh/internal/worker/worker.go b/Exesh/internal/worker/worker.go index b9aadb61..c65bb01f 100644 --- a/Exesh/internal/worker/worker.go +++ b/Exesh/internal/worker/worker.go @@ -2,7 +2,6 @@ package worker import ( "context" - "encoding/json" "exesh/internal/api/heartbeat" "exesh/internal/config" "exesh/internal/domain/execution/job/jobs" @@ -143,8 +142,8 @@ func (w *Worker) runWorker(ctx context.Context) { continue } - js, _ := json.Marshal(job) - w.log.Debug("picked job", slog.Any("job_id", (*job).GetID()), slog.String("job", string(js))) + jobID := job.GetID() + w.log.Debug("picked job", slog.String("job", jobID.String())) result := w.jobExecutor.Execute(ctx, *job) @@ -152,9 +151,7 @@ func (w *Worker) runWorker(ctx context.Context) { w.doneJobs = append(w.doneJobs, result) w.mu.Unlock() - js, _ = json.Marshal(result) - w.log.Info("done job", slog.Any("job_id", (*job).GetID()), slog.Any("error", result.GetError()), - slog.Any("result", js)) + w.log.Debug("done job", slog.String("job", jobID.String())) w.changeFreeSlots(+1) } } From fce63844f325673a789aa2274a1e21d6400d269e Mon Sep 17 00:00:00 2001 From: divancode Date: Mon, 2 Feb 2026 03:48:21 +0300 Subject: [PATCH 3/4] fix --- .../executors/run_cpp_job_executor.go | 83 ++++++++++--------- .../executor/executors/run_go_job_executor.go | 83 ++++++++++--------- .../executor/executors/run_py_job_executor.go | 83 ++++++++++--------- .../provider/adapter/filestorage_adapter.go | 2 +- Exesh/internal/provider/source_provider.go | 5 ++ 5 files changed, 135 insertions(+), 121 deletions(-) diff --git a/Exesh/internal/executor/executors/run_cpp_job_executor.go b/Exesh/internal/executor/executors/run_cpp_job_executor.go index b263a32a..d343dbd9 100644 --- a/Exesh/internal/executor/executors/run_cpp_job_executor.go +++ b/Exesh/internal/executor/executors/run_cpp_job_executor.go @@ -9,6 +9,7 @@ import ( "exesh/internal/domain/execution/result/results" "exesh/internal/runtime" "fmt" + errs "github.com/DIvanCode/filestorage/pkg/errors" "io" "log/slog" "time" @@ -57,52 +58,54 @@ func (e *RunCppJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Re defer unlock() runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runCppJob.RunOutput.File) - if err != nil { + if err != nil && !errors.Is(err, errs.ErrFileAlreadyExists) { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } - commit := func() error { - if err = commitOutput(); err != nil { - _ = abortOutput() - return fmt.Errorf("failed to commit run_output output: %w", err) - } - abortOutput = func() error { return nil } - return nil - } - defer func() { - _ = abortOutput() - }() - - const compiledCodeMountPath = "/a.out" - - stderr := bytes.NewBuffer(nil) - err = e.runtime.Execute(ctx, - []string{compiledCodeMountPath}, - runtime.ExecuteParams{ - // TODO: Limits - Limits: runtime.Limits{ - Memory: runtime.MemoryLimit(int64(runCppJob.MemoryLimit) * int64(runtime.Megabyte)), - Time: runtime.TimeLimit(int64(runCppJob.TimeLimit) * int64(time.Millisecond)), - }, - InFiles: []runtime.File{{OutsideLocation: compiledCode, InsideLocation: compiledCodeMountPath}}, - Stderr: stderr, - Stdin: runInput, - Stdout: runOutput, - }) - if err != nil { - e.log.Error("execute binary in runtime error", slog.Any("err", err)) - if errors.Is(err, runtime.ErrTimeout) { - return results.NewRunResultTL(jb.GetID()) + if err == nil { // if file already exists, do not run command + commit := func() error { + if err = commitOutput(); err != nil { + _ = abortOutput() + return fmt.Errorf("failed to commit run_output output: %w", err) + } + abortOutput = func() error { return nil } + return nil } - if errors.Is(err, runtime.ErrOutOfMemory) { - return results.NewRunResultML(jb.GetID()) + defer func() { + _ = abortOutput() + }() + + const compiledCodeMountPath = "/a.out" + + stderr := bytes.NewBuffer(nil) + err = e.runtime.Execute(ctx, + []string{compiledCodeMountPath}, + runtime.ExecuteParams{ + // TODO: Limits + Limits: runtime.Limits{ + Memory: runtime.MemoryLimit(int64(runCppJob.MemoryLimit) * int64(runtime.Megabyte)), + Time: runtime.TimeLimit(int64(runCppJob.TimeLimit) * int64(time.Millisecond)), + }, + InFiles: []runtime.File{{OutsideLocation: compiledCode, InsideLocation: compiledCodeMountPath}}, + Stderr: stderr, + Stdin: runInput, + Stdout: runOutput, + }) + if err != nil { + e.log.Error("execute binary in runtime error", slog.Any("err", err)) + if errors.Is(err, runtime.ErrTimeout) { + return results.NewRunResultTL(jb.GetID()) + } + if errors.Is(err, runtime.ErrOutOfMemory) { + return results.NewRunResultML(jb.GetID()) + } + return results.NewRunResultRE(jb.GetID()) } - return results.NewRunResultRE(jb.GetID()) - } - e.log.Info("command ok") + e.log.Info("command ok") - if err = commit(); err != nil { - return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) + if err = commit(); err != nil { + return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) + } } if !runCppJob.ShowOutput { diff --git a/Exesh/internal/executor/executors/run_go_job_executor.go b/Exesh/internal/executor/executors/run_go_job_executor.go index 62f2d2d5..2cd10d87 100644 --- a/Exesh/internal/executor/executors/run_go_job_executor.go +++ b/Exesh/internal/executor/executors/run_go_job_executor.go @@ -7,6 +7,7 @@ import ( "exesh/internal/domain/execution/job" "exesh/internal/domain/execution/job/jobs" "fmt" + errs "github.com/DIvanCode/filestorage/pkg/errors" "io" "log/slog" "time" @@ -58,52 +59,54 @@ func (e *RunGoJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Res defer unlock() runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runGoJob.RunOutput.File) - if err != nil { + if err != nil && !errors.Is(err, errs.ErrFileAlreadyExists) { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } - commit := func() error { - if err = commitOutput(); err != nil { - _ = abortOutput() - return fmt.Errorf("failed to commit run_output output: %w", err) - } - abortOutput = func() error { return nil } - return nil - } - defer func() { - _ = abortOutput() - }() - - const compiledCodeMountPath = "/a.out" - - stderr := bytes.NewBuffer(nil) - err = e.runtime.Execute(ctx, - []string{compiledCodeMountPath}, - runtime.ExecuteParams{ - // TODO: Limits - Limits: runtime.Limits{ - Memory: runtime.MemoryLimit(int64(runGoJob.MemoryLimit) * int64(runtime.Megabyte)), - Time: runtime.TimeLimit(int64(runGoJob.TimeLimit) * int64(time.Millisecond)), - }, - InFiles: []runtime.File{{OutsideLocation: compiledCode, InsideLocation: compiledCodeMountPath}}, - Stderr: stderr, - Stdin: runInput, - Stdout: runOutput, - }) - if err != nil { - e.log.Error("execute binary in runtime error", slog.Any("err", err)) - if errors.Is(err, runtime.ErrTimeout) { - return results.NewRunResultTL(jb.GetID()) + if err == nil { // if file already exists, do not run command + commit := func() error { + if err = commitOutput(); err != nil { + _ = abortOutput() + return fmt.Errorf("failed to commit run_output output: %w", err) + } + abortOutput = func() error { return nil } + return nil } - if errors.Is(err, runtime.ErrOutOfMemory) { - return results.NewRunResultML(jb.GetID()) + defer func() { + _ = abortOutput() + }() + + const compiledCodeMountPath = "/a.out" + + stderr := bytes.NewBuffer(nil) + err = e.runtime.Execute(ctx, + []string{compiledCodeMountPath}, + runtime.ExecuteParams{ + // TODO: Limits + Limits: runtime.Limits{ + Memory: runtime.MemoryLimit(int64(runGoJob.MemoryLimit) * int64(runtime.Megabyte)), + Time: runtime.TimeLimit(int64(runGoJob.TimeLimit) * int64(time.Millisecond)), + }, + InFiles: []runtime.File{{OutsideLocation: compiledCode, InsideLocation: compiledCodeMountPath}}, + Stderr: stderr, + Stdin: runInput, + Stdout: runOutput, + }) + if err != nil { + e.log.Error("execute binary in runtime error", slog.Any("err", err)) + if errors.Is(err, runtime.ErrTimeout) { + return results.NewRunResultTL(jb.GetID()) + } + if errors.Is(err, runtime.ErrOutOfMemory) { + return results.NewRunResultML(jb.GetID()) + } + return results.NewRunResultRE(jb.GetID()) } - return results.NewRunResultRE(jb.GetID()) - } - e.log.Info("command ok") + e.log.Info("command ok") - if err = commit(); err != nil { - return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) + if err = commit(); err != nil { + return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) + } } if !runGoJob.ShowOutput { diff --git a/Exesh/internal/executor/executors/run_py_job_executor.go b/Exesh/internal/executor/executors/run_py_job_executor.go index f05ee62a..1e0aee08 100644 --- a/Exesh/internal/executor/executors/run_py_job_executor.go +++ b/Exesh/internal/executor/executors/run_py_job_executor.go @@ -9,6 +9,7 @@ import ( "exesh/internal/domain/execution/result/results" "exesh/internal/runtime" "fmt" + errs "github.com/DIvanCode/filestorage/pkg/errors" "io" "log/slog" "time" @@ -57,52 +58,54 @@ func (e *RunPyJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Res defer unlock() runOutput, commitOutput, abortOutput, err := e.outputProvider.Create(ctx, jb.GetID(), runPyJob.RunOutput.File) - if err != nil { + if err != nil && !errors.Is(err, errs.ErrFileAlreadyExists) { return errorResult(fmt.Errorf("failed to create run_output output: %w", err)) } - commit := func() error { - if err = commitOutput(); err != nil { - _ = abortOutput() - return fmt.Errorf("failed to commit run_output output: %w", err) - } - abortOutput = func() error { return nil } - return nil - } - defer func() { - _ = abortOutput() - }() - - const codeLocation = "/main.py" - - stderr := bytes.NewBuffer(nil) - err = e.runtime.Execute(ctx, - []string{"python3", codeLocation}, - runtime.ExecuteParams{ - // TODO: Limits - Limits: runtime.Limits{ - Memory: runtime.MemoryLimit(int64(runPyJob.MemoryLimit) * int64(runtime.Megabyte)), - Time: runtime.TimeLimit(int64(runPyJob.TimeLimit) * int64(time.Millisecond)), - }, - InFiles: []runtime.File{{OutsideLocation: code, InsideLocation: codeLocation}}, - Stderr: stderr, - Stdin: runInput, - Stdout: runOutput, - }) - if err != nil { - e.log.Error("execute binary in runtime error", slog.Any("err", err)) - if errors.Is(err, runtime.ErrTimeout) { - return results.NewRunResultTL(jb.GetID()) + if err == nil { // if file already exists, do not run command + commit := func() error { + if err = commitOutput(); err != nil { + _ = abortOutput() + return fmt.Errorf("failed to commit run_output output: %w", err) + } + abortOutput = func() error { return nil } + return nil } - if errors.Is(err, runtime.ErrOutOfMemory) { - return results.NewRunResultML(jb.GetID()) + defer func() { + _ = abortOutput() + }() + + const codeLocation = "/main.py" + + stderr := bytes.NewBuffer(nil) + err = e.runtime.Execute(ctx, + []string{"python3", codeLocation}, + runtime.ExecuteParams{ + // TODO: Limits + Limits: runtime.Limits{ + Memory: runtime.MemoryLimit(int64(runPyJob.MemoryLimit) * int64(runtime.Megabyte)), + Time: runtime.TimeLimit(int64(runPyJob.TimeLimit) * int64(time.Millisecond)), + }, + InFiles: []runtime.File{{OutsideLocation: code, InsideLocation: codeLocation}}, + Stderr: stderr, + Stdin: runInput, + Stdout: runOutput, + }) + if err != nil { + e.log.Error("execute binary in runtime error", slog.Any("err", err)) + if errors.Is(err, runtime.ErrTimeout) { + return results.NewRunResultTL(jb.GetID()) + } + if errors.Is(err, runtime.ErrOutOfMemory) { + return results.NewRunResultML(jb.GetID()) + } + return results.NewRunResultRE(jb.GetID()) } - return results.NewRunResultRE(jb.GetID()) - } - e.log.Info("command ok") + e.log.Info("command ok") - if commitErr := commit(); commitErr != nil { - return errorResult(commitErr) + if err = commit(); err != nil { + return errorResult(fmt.Errorf("failed to commit output creation: %w", err)) + } } if !runPyJob.ShowOutput { diff --git a/Exesh/internal/provider/adapter/filestorage_adapter.go b/Exesh/internal/provider/adapter/filestorage_adapter.go index 357d549b..d6f46f59 100644 --- a/Exesh/internal/provider/adapter/filestorage_adapter.go +++ b/Exesh/internal/provider/adapter/filestorage_adapter.go @@ -119,7 +119,7 @@ func (a *FilestorageAdapter) ReserveFile( // the file will be reserved in bucket // it needs for producing files using writer // if the bucket does not exist, it will be created -// if the file already exists, then the ErrSourceAlreadyExists will be returned +// if the file already exists, then the error from filestorage will be returned func (a *FilestorageAdapter) CreateFile( ctx context.Context, bucketID bucket.ID, diff --git a/Exesh/internal/provider/source_provider.go b/Exesh/internal/provider/source_provider.go index 34b9ec2d..5a3eae22 100644 --- a/Exesh/internal/provider/source_provider.go +++ b/Exesh/internal/provider/source_provider.go @@ -2,11 +2,13 @@ package provider import ( "context" + "errors" "exesh/internal/config" "exesh/internal/domain/execution/source" "exesh/internal/domain/execution/source/sources" "fmt" "github.com/DIvanCode/filestorage/pkg/bucket" + errs "github.com/DIvanCode/filestorage/pkg/errors" "io" "sync" ) @@ -50,6 +52,9 @@ func (p *SourceProvider) SaveSource(ctx context.Context, src sources.Source) err bucketTTL := p.cfg.FilestorageBucketTTL w, commit, abort, err := p.filestorage.CreateFile(ctx, bucketID, file, bucketTTL) + if err != nil && errors.Is(err, errs.ErrFileAlreadyExists) { + return nil + } if err != nil { return fmt.Errorf("failed to create file: %w", err) } From 8a19f5c9ed97fda6e76397b78aa410d4cb239dab Mon Sep 17 00:00:00 2001 From: divancode Date: Mon, 2 Feb 2026 03:50:44 +0300 Subject: [PATCH 4/4] fix --- Exesh/internal/executor/executors/run_cpp_job_executor.go | 1 - Exesh/internal/executor/executors/run_go_job_executor.go | 1 - Exesh/internal/executor/executors/run_py_job_executor.go | 1 - 3 files changed, 3 deletions(-) diff --git a/Exesh/internal/executor/executors/run_cpp_job_executor.go b/Exesh/internal/executor/executors/run_cpp_job_executor.go index d343dbd9..474adaa7 100644 --- a/Exesh/internal/executor/executors/run_cpp_job_executor.go +++ b/Exesh/internal/executor/executors/run_cpp_job_executor.go @@ -80,7 +80,6 @@ func (e *RunCppJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Re err = e.runtime.Execute(ctx, []string{compiledCodeMountPath}, runtime.ExecuteParams{ - // TODO: Limits Limits: runtime.Limits{ Memory: runtime.MemoryLimit(int64(runCppJob.MemoryLimit) * int64(runtime.Megabyte)), Time: runtime.TimeLimit(int64(runCppJob.TimeLimit) * int64(time.Millisecond)), diff --git a/Exesh/internal/executor/executors/run_go_job_executor.go b/Exesh/internal/executor/executors/run_go_job_executor.go index 2cd10d87..53c5aa8d 100644 --- a/Exesh/internal/executor/executors/run_go_job_executor.go +++ b/Exesh/internal/executor/executors/run_go_job_executor.go @@ -81,7 +81,6 @@ func (e *RunGoJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Res err = e.runtime.Execute(ctx, []string{compiledCodeMountPath}, runtime.ExecuteParams{ - // TODO: Limits Limits: runtime.Limits{ Memory: runtime.MemoryLimit(int64(runGoJob.MemoryLimit) * int64(runtime.Megabyte)), Time: runtime.TimeLimit(int64(runGoJob.TimeLimit) * int64(time.Millisecond)), diff --git a/Exesh/internal/executor/executors/run_py_job_executor.go b/Exesh/internal/executor/executors/run_py_job_executor.go index 1e0aee08..7c6f55e1 100644 --- a/Exesh/internal/executor/executors/run_py_job_executor.go +++ b/Exesh/internal/executor/executors/run_py_job_executor.go @@ -80,7 +80,6 @@ func (e *RunPyJobExecutor) Execute(ctx context.Context, jb jobs.Job) results.Res err = e.runtime.Execute(ctx, []string{"python3", codeLocation}, runtime.ExecuteParams{ - // TODO: Limits Limits: runtime.Limits{ Memory: runtime.MemoryLimit(int64(runPyJob.MemoryLimit) * int64(runtime.Megabyte)), Time: runtime.TimeLimit(int64(runPyJob.TimeLimit) * int64(time.Millisecond)),