From 88b5529cc41c91f819ca8feee52b9f13b9e411d7 Mon Sep 17 00:00:00 2001 From: Gianluca Zuccarelli Date: Mon, 10 Jan 2022 13:11:54 +0000 Subject: [PATCH] osbuild-worker: test error backwards compatability Since the workers will use structured error messages going forward, it is necessary to maintain backwards compatability for there errors in composer. Tests have been added to the various apis to ensure that each api checks for both kinds of errors, old and new. --- internal/cloudapi/v2/v2_test.go | 110 +++++++++++++ internal/kojiapi/server_test.go | 99 ++++++++++++ internal/weldr/compose_test.go | 109 +++++++++++++ internal/worker/server_test.go | 266 ++++++++++++++++++++++++++++++++ 4 files changed, 584 insertions(+) create mode 100644 internal/weldr/compose_test.go diff --git a/internal/cloudapi/v2/v2_test.go b/internal/cloudapi/v2/v2_test.go index 48e824ef1..eb2f731c5 100644 --- a/internal/cloudapi/v2/v2_test.go +++ b/internal/cloudapi/v2/v2_test.go @@ -18,6 +18,7 @@ import ( "github.com/osbuild/osbuild-composer/internal/rpmmd" "github.com/osbuild/osbuild-composer/internal/test" "github.com/osbuild/osbuild-composer/internal/worker" + "github.com/osbuild/osbuild-composer/internal/worker/clienterrors" ) func newV2Server(t *testing.T, dir string) (*v2.Server, *worker.Server, context.CancelFunc) { @@ -369,6 +370,115 @@ func TestComposeStatusFailure(t *testing.T) { }`, jobId, jobId)) } +func TestComposeLegacyError(t *testing.T) { + dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-") + require.NoError(t, err) + defer os.RemoveAll(dir) + srv, wrksrv, cancel := newV2Server(t, dir) + defer cancel() + + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "POST", "/api/image-builder-composer/v2/compose", fmt.Sprintf(` + { + "distribution": "%s", + "image_request":{ + "architecture": "%s", + "image_type": "aws", + "repositories": [{ + "baseurl": "somerepo.org", + "rhsm": false + }], + "upload_options": { + "region": "eu-central-1" + } + } + }`, test_distro.TestDistroName, test_distro.TestArch3Name), http.StatusCreated, ` + { + "href": "/api/image-builder-composer/v2/compose", + "kind": "ComposeId" + }`, "id") + + jobId, token, jobType, _, _, err := wrksrv.RequestJob(context.Background(), test_distro.TestArch3Name, []string{"osbuild"}) + require.NoError(t, err) + require.Equal(t, "osbuild", jobType) + + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(` + { + "href": "/api/image-builder-composer/v2/composes/%v", + "kind": "ComposeStatus", + "id": "%v", + "image_status": {"status": "building"} + }`, jobId, jobId)) + + jobResult, err := json.Marshal(worker.OSBuildJobResult{TargetErrors: []string{"Osbuild failed"}}) + require.NoError(t, err) + + err = wrksrv.FinishJob(token, jobResult) + require.NoError(t, err) + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(` + { + "href": "/api/image-builder-composer/v2/composes/%v", + "kind": "ComposeStatus", + "id": "%v", + "image_status": {"status": "failure"} + }`, jobId, jobId)) +} + +func TestComposeJobError(t *testing.T) { + dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-") + require.NoError(t, err) + defer os.RemoveAll(dir) + srv, wrksrv, cancel := newV2Server(t, dir) + defer cancel() + + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "POST", "/api/image-builder-composer/v2/compose", fmt.Sprintf(` + { + "distribution": "%s", + "image_request":{ + "architecture": "%s", + "image_type": "aws", + "repositories": [{ + "baseurl": "somerepo.org", + "rhsm": false + }], + "upload_options": { + "region": "eu-central-1" + } + } + }`, test_distro.TestDistroName, test_distro.TestArch3Name), http.StatusCreated, ` + { + "href": "/api/image-builder-composer/v2/compose", + "kind": "ComposeId" + }`, "id") + + jobId, token, jobType, _, _, err := wrksrv.RequestJob(context.Background(), test_distro.TestArch3Name, []string{"osbuild"}) + require.NoError(t, err) + require.Equal(t, "osbuild", jobType) + + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(` + { + "href": "/api/image-builder-composer/v2/composes/%v", + "kind": "ComposeStatus", + "id": "%v", + "image_status": {"status": "building"} + }`, jobId, jobId)) + + jobErr := worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, "Error building image"), + } + jobResult, err := json.Marshal(worker.OSBuildJobResult{JobResult: jobErr}) + require.NoError(t, err) + + err = wrksrv.FinishJob(token, jobResult) + require.NoError(t, err) + test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(` + { + "href": "/api/image-builder-composer/v2/composes/%v", + "kind": "ComposeStatus", + "id": "%v", + "image_status": {"status": "failure"} + }`, jobId, jobId)) +} + func TestComposeCustomizations(t *testing.T) { dir, err := ioutil.TempDir("", "osbuild-composer-test-api-v2-") require.NoError(t, err) diff --git a/internal/kojiapi/server_test.go b/internal/kojiapi/server_test.go index 3c7b69776..e3cd1a1dc 100644 --- a/internal/kojiapi/server_test.go +++ b/internal/kojiapi/server_test.go @@ -22,6 +22,7 @@ import ( osbuild "github.com/osbuild/osbuild-composer/internal/osbuild2" "github.com/osbuild/osbuild-composer/internal/test" "github.com/osbuild/osbuild-composer/internal/worker" + "github.com/osbuild/osbuild-composer/internal/worker/clienterrors" "github.com/stretchr/testify/require" ) @@ -135,6 +136,36 @@ func TestCompose(t *testing.T) { "status": "failure" }`, }, + { + initResult: worker.KojiInitJobResult{ + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorKojiInit, "Koji init error"), + }, + }, + buildResult: worker.OSBuildKojiJobResult{ + Arch: test_distro.TestArchName, + HostOS: test_distro.TestDistroName, + ImageHash: "browns", + ImageSize: 42, + OSBuildOutput: &osbuild.Result{ + Success: true, + }, + }, + composeReplyCode: http.StatusBadRequest, + composeReply: `{"message":"Could not initialize build with koji: Koji init error"}`, + composeStatus: `{ + "image_statuses": [ + { + "status": "failure" + }, + { + "status": "failure" + } + ], + "koji_task_id": 0, + "status": "failure" + }`, + }, { initResult: worker.KojiInitJobResult{ BuildID: 42, @@ -196,6 +227,39 @@ func TestCompose(t *testing.T) { "status": "failure" }`, }, + { + initResult: worker.KojiInitJobResult{ + BuildID: 42, + Token: `"foobar"`, + }, + buildResult: worker.OSBuildKojiJobResult{ + Arch: test_distro.TestArchName, + HostOS: test_distro.TestDistroName, + ImageHash: "browns", + ImageSize: 42, + OSBuildOutput: &osbuild.Result{ + Success: true, + }, + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, "Koji build error"), + }, + }, + composeReplyCode: http.StatusCreated, + composeReply: `{"koji_build_id":42}`, + composeStatus: `{ + "image_statuses": [ + { + "status": "failure" + }, + { + "status": "success" + } + ], + "koji_build_id": 42, + "koji_task_id": 0, + "status": "failure" + }`, + }, { initResult: worker.KojiInitJobResult{ BuildID: 42, @@ -229,6 +293,41 @@ func TestCompose(t *testing.T) { "status": "failure" }`, }, + { + initResult: worker.KojiInitJobResult{ + BuildID: 42, + Token: `"foobar"`, + }, + buildResult: worker.OSBuildKojiJobResult{ + Arch: test_distro.TestArchName, + HostOS: test_distro.TestDistroName, + ImageHash: "browns", + ImageSize: 42, + OSBuildOutput: &osbuild.Result{ + Success: true, + }, + }, + finalizeResult: worker.KojiFinalizeJobResult{ + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorKojiFinalize, "Koji finalize error"), + }, + }, + composeReplyCode: http.StatusCreated, + composeReply: `{"koji_build_id":42}`, + composeStatus: `{ + "image_statuses": [ + { + "status": "success" + }, + { + "status": "success" + } + ], + "koji_build_id": 42, + "koji_task_id": 0, + "status": "failure" + }`, + }, } for _, c := range cases { var wg sync.WaitGroup diff --git a/internal/weldr/compose_test.go b/internal/weldr/compose_test.go new file mode 100644 index 000000000..5f1d8d1d2 --- /dev/null +++ b/internal/weldr/compose_test.go @@ -0,0 +1,109 @@ +package weldr + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "testing" + + "github.com/osbuild/osbuild-composer/internal/distro" + "github.com/osbuild/osbuild-composer/internal/distro/test_distro" + rpmmd_mock "github.com/osbuild/osbuild-composer/internal/mocks/rpmmd" + "github.com/osbuild/osbuild-composer/internal/worker" + "github.com/osbuild/osbuild-composer/internal/worker/clienterrors" + "github.com/stretchr/testify/require" +) + +func TestComposeStatusFromLegacyError(t *testing.T) { + + if len(os.Getenv("OSBUILD_COMPOSER_TEST_EXTERNAL")) > 0 { + t.Skip("This test is for internal testing only") + } + + tempdir, err := ioutil.TempDir("", "weldr-tests-") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + api, _ := createWeldrAPI(tempdir, rpmmd_mock.BaseFixture) + + distroStruct := test_distro.New() + arch, err := distroStruct.GetArch(test_distro.TestArchName) + if err != nil { + t.Fatalf("error getting arch from distro: %v", err) + } + imageType, err := arch.GetImageType(test_distro.TestImageTypeName) + if err != nil { + t.Fatalf("error getting image type from arch: %v", err) + } + manifest, err := imageType.Manifest(nil, distro.ImageOptions{Size: imageType.Size(0)}, nil, nil, 0) + if err != nil { + t.Fatalf("error creating osbuild manifest: %v", err) + } + + jobId, err := api.workers.EnqueueOSBuild(arch.Name(), &worker.OSBuildJob{Manifest: manifest}) + require.NoError(t, err) + + j, token, _, _, _, err := api.workers.RequestJob(context.Background(), arch.Name(), []string{"osbuild"}) + require.NoError(t, err) + require.Equal(t, jobId, j) + + jobResult := worker.OSBuildJobResult{TargetErrors: []string{"Upload error"}} + rawResult, err := json.Marshal(jobResult) + require.NoError(t, err) + err = api.workers.FinishJob(token, rawResult) + require.NoError(t, err) + + jobStatus, _, err := api.workers.JobStatus(jobId, &jobResult) + require.NoError(t, err) + + state := composeStateFromJobStatus(jobStatus, &jobResult) + require.Equal(t, "FAILED", state.ToString()) +} + +func TestComposeStatusFromJobError(t *testing.T) { + + if len(os.Getenv("OSBUILD_COMPOSER_TEST_EXTERNAL")) > 0 { + t.Skip("This test is for internal testing only") + } + + tempdir, err := ioutil.TempDir("", "weldr-tests-") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + api, _ := createWeldrAPI(tempdir, rpmmd_mock.BaseFixture) + + distroStruct := test_distro.New() + arch, err := distroStruct.GetArch(test_distro.TestArchName) + if err != nil { + t.Fatalf("error getting arch from distro: %v", err) + } + imageType, err := arch.GetImageType(test_distro.TestImageTypeName) + if err != nil { + t.Fatalf("error getting image type from arch: %v", err) + } + manifest, err := imageType.Manifest(nil, distro.ImageOptions{Size: imageType.Size(0)}, nil, nil, 0) + if err != nil { + t.Fatalf("error creating osbuild manifest: %v", err) + } + + jobId, err := api.workers.EnqueueOSBuild(arch.Name(), &worker.OSBuildJob{Manifest: manifest}) + require.NoError(t, err) + + j, token, _, _, _, err := api.workers.RequestJob(context.Background(), arch.Name(), []string{"osbuild"}) + require.NoError(t, err) + require.Equal(t, jobId, j) + + jobResult := worker.OSBuildJobResult{} + jobResult.JobError = clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, "Upload error") + rawResult, err := json.Marshal(jobResult) + require.NoError(t, err) + err = api.workers.FinishJob(token, rawResult) + require.NoError(t, err) + + jobStatus, _, err := api.workers.JobStatus(jobId, &jobResult) + require.NoError(t, err) + + state := composeStateFromJobStatus(jobStatus, &jobResult) + require.Equal(t, "FAILED", state.ToString()) +} diff --git a/internal/worker/server_test.go b/internal/worker/server_test.go index 44cd52393..502c759c2 100644 --- a/internal/worker/server_test.go +++ b/internal/worker/server_test.go @@ -22,6 +22,7 @@ import ( "github.com/osbuild/osbuild-composer/internal/osbuild2" "github.com/osbuild/osbuild-composer/internal/test" "github.com/osbuild/osbuild-composer/internal/worker" + "github.com/osbuild/osbuild-composer/internal/worker/clienterrors" ) func newTestServer(t *testing.T, tempdir string, jobRequestTimeout time.Duration, basePath string) *worker.Server { @@ -758,3 +759,268 @@ func TestMixedOSBuildKojiJob(t *testing.T) { require.NoError(err) require.Equal(newJobResult, newJobResultRead) } + +func TestDepsolveLegacyErrorConversion(t *testing.T) { + tempdir, err := ioutil.TempDir("", "worker-tests-") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + distroStruct := test_distro.New() + arch, err := distroStruct.GetArch(test_distro.TestArchName) + if err != nil { + t.Fatalf("error getting arch from distro: %v", err) + } + server := newTestServer(t, tempdir, time.Duration(0), "/api/worker/v1") + + depsolveJobId, err := server.EnqueueDepsolve(&worker.DepsolveJob{}) + require.NoError(t, err) + + _, _, _, _, _, err = server.RequestJob(context.Background(), arch.Name(), []string{"depsolve"}) + require.NoError(t, err) + + reason := "Depsolve failed" + errType := worker.DepsolveErrorType + + expectedResult := worker.DepsolveJobResult{ + Error: reason, + ErrorType: errType, + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorDNFDepsolveError, reason), + }, + } + + depsolveJobResult := worker.DepsolveJobResult{ + Error: reason, + ErrorType: errType, + } + + _, _, err = server.JobStatus(depsolveJobId, &depsolveJobResult) + require.NoError(t, err) + require.Equal(t, expectedResult, depsolveJobResult) +} + +// Enquueue OSBuild jobs and save both kinds of +// error types (old & new) to the queue and ensure +// that both kinds of errors can be read back +func TestMixedOSBuildJobErrors(t *testing.T) { + require := require.New(t) + tempdir, err := ioutil.TempDir("", "worker-tests-") + require.NoError(err) + defer os.RemoveAll(tempdir) + + emptyManifestV2 := distro.Manifest(`{"version":"2","pipelines":{}}`) + server := newTestServer(t, tempdir, time.Millisecond*10, "/") + + oldJob := worker.OSBuildJob{ + Manifest: emptyManifestV2, + ImageName: "no-pipeline-names", + } + oldJobID, err := server.EnqueueOSBuild("x", &oldJob) + require.NoError(err) + + newJob := worker.OSBuildJob{ + Manifest: emptyManifestV2, + ImageName: "with-pipeline-names", + PipelineNames: &worker.PipelineNames{ + Build: []string{"build"}, + Payload: []string{"other", "pipelines"}, + }, + } + newJobID, err := server.EnqueueOSBuild("x", &newJob) + require.NoError(err) + + oldJobRead := new(worker.OSBuildJob) + _, _, _, err = server.Job(oldJobID, oldJobRead) + require.NoError(err) + // Not entirely equal + require.NotEqual(oldJob, oldJobRead) + + // NewJob the same when read back + newJobRead := new(worker.OSBuildJob) + _, _, _, err = server.Job(newJobID, newJobRead) + require.NoError(err) + + // Dequeue the jobs (via RequestJob) to get their tokens and update them to + // test the result retrieval + + getJob := func() (uuid.UUID, uuid.UUID) { + // don't block forever if the jobs weren't added or can't be retrieved + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + id, token, _, _, _, err := server.RequestJob(ctx, "x", []string{"osbuild"}) + require.NoError(err) + return id, token + } + + getJobTokens := func(n uint) map[uuid.UUID]uuid.UUID { + tokens := make(map[uuid.UUID]uuid.UUID, n) + for idx := uint(0); idx < n; idx++ { + id, token := getJob() + tokens[id] = token + } + return tokens + } + + jobTokens := getJobTokens(2) + // make sure we got them both as expected + require.Contains(jobTokens, oldJobID) + require.Contains(jobTokens, newJobID) + + oldJobResult := &worker.OSBuildJobResult{ + TargetErrors: []string{"Upload error"}, + } + oldJobResultRaw, err := json.Marshal(oldJobResult) + require.NoError(err) + oldJobToken := jobTokens[oldJobID] + err = server.FinishJob(oldJobToken, oldJobResultRaw) + require.NoError(err) + + oldJobResultRead := new(worker.OSBuildJobResult) + _, _, err = server.JobStatus(oldJobID, oldJobResultRead) + require.NoError(err) + + require.NotEqual(oldJobResult, oldJobResultRead) + require.Equal(oldJobResult.Success, false) + require.Equal(oldJobResultRead.Success, false) + + newJobResult := &worker.OSBuildJobResult{ + PipelineNames: &worker.PipelineNames{ + Build: []string{"build-result"}, + Payload: []string{"result-test-payload", "result-test-assembler"}, + }, + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, "Error uploading image", nil), + }, + } + newJobResultRaw, err := json.Marshal(newJobResult) + require.NoError(err) + newJobToken := jobTokens[newJobID] + err = server.FinishJob(newJobToken, newJobResultRaw) + require.NoError(err) + + newJobResultRead := new(worker.OSBuildJobResult) + _, _, err = server.JobStatus(newJobID, newJobResultRead) + require.NoError(err) + require.Equal(newJobResult, newJobResultRead) + require.Equal(newJobResult.Success, false) + require.Equal(newJobResultRead.Success, false) +} + +// Enquueue Koji jobs and save both kinds of +// error types (old & new) to the queue and ensure +// that both kinds of errors can be read back +func TestMixedOSBuildKojiJobErrors(t *testing.T) { + require := require.New(t) + tempdir, err := ioutil.TempDir("", "worker-tests-") + require.NoError(err) + defer os.RemoveAll(tempdir) + + emptyManifestV2 := distro.Manifest(`{"version":"2","pipelines":{}}`) + server := newTestServer(t, tempdir, time.Duration(0), "/api/worker/v1") + + enqueueKojiJob := func(job *worker.OSBuildKojiJob) uuid.UUID { + initJob := new(worker.KojiInitJob) + initJobID, err := server.EnqueueKojiInit(initJob) + require.NoError(err) + jobID, err := server.EnqueueOSBuildKoji("k", job, initJobID) + require.NoError(err) + return jobID + } + oldJob := worker.OSBuildKojiJob{ + Manifest: emptyManifestV2, + ImageName: "no-pipeline-names", + } + oldJobID := enqueueKojiJob(&oldJob) + + newJob := worker.OSBuildKojiJob{ + Manifest: emptyManifestV2, + ImageName: "with-pipeline-names", + PipelineNames: &worker.PipelineNames{ + Build: []string{"build"}, + Payload: []string{"other", "pipelines"}, + }, + } + newJobID := enqueueKojiJob(&newJob) + + oldJobRead := new(worker.OSBuildKojiJob) + _, _, _, err = server.Job(oldJobID, oldJobRead) + require.NoError(err) + // Not entirely equal + require.NotEqual(oldJob, oldJobRead) + + // NewJob the same when read back + newJobRead := new(worker.OSBuildKojiJob) + _, _, _, err = server.Job(newJobID, newJobRead) + require.NoError(err) + + // Dequeue the jobs (via RequestJob) to get their tokens and update them to + // test the result retrieval + + // Finish init jobs + for idx := uint(0); idx < 2; idx++ { + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + _, token, _, _, _, err := server.RequestJob(ctx, "k", []string{"koji-init"}) + require.NoError(err) + require.NoError(server.FinishJob(token, nil)) + } + + getJob := func() (uuid.UUID, uuid.UUID) { + // don't block forever if the jobs weren't added or can't be retrieved + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + id, token, _, _, _, err := server.RequestJob(ctx, "k", []string{"osbuild-koji"}) + require.NoError(err) + return id, token + } + + getJobTokens := func(n uint) map[uuid.UUID]uuid.UUID { + tokens := make(map[uuid.UUID]uuid.UUID, n) + for idx := uint(0); idx < n; idx++ { + id, token := getJob() + tokens[id] = token + } + return tokens + } + + jobTokens := getJobTokens(2) + // make sure we got them both as expected + require.Contains(jobTokens, oldJobID) + require.Contains(jobTokens, newJobID) + + oldJobResult := &worker.OSBuildKojiJobResult{ + KojiError: "koji build error", + } + oldJobResultRaw, err := json.Marshal(oldJobResult) + require.NoError(err) + oldJobToken := jobTokens[oldJobID] + err = server.FinishJob(oldJobToken, oldJobResultRaw) + require.NoError(err) + + oldJobResultRead := new(worker.OSBuildKojiJobResult) + _, _, err = server.JobStatus(oldJobID, oldJobResultRead) + require.NoError(err) + + // oldJobResultRead should have PipelineNames now + require.NotEqual(oldJobResult, oldJobResultRead) + + newJobResult := &worker.OSBuildKojiJobResult{ + PipelineNames: &worker.PipelineNames{ + Build: []string{"build-result"}, + Payload: []string{"result-test-payload", "result-test-assembler"}, + }, + JobResult: worker.JobResult{ + JobError: clienterrors.WorkerClientError(clienterrors.ErrorKojiBuild, "Koji build error", nil), + }, + } + newJobResultRaw, err := json.Marshal(newJobResult) + require.NoError(err) + newJobToken := jobTokens[newJobID] + err = server.FinishJob(newJobToken, newJobResultRaw) + require.NoError(err) + + newJobResultRead := new(worker.OSBuildKojiJobResult) + _, _, err = server.JobStatus(newJobID, newJobResultRead) + require.NoError(err) + require.Equal(newJobResult, newJobResultRead) +}