worker: delete TargetErrors from OSBuildJobResult
The `TargetErrors` is not used any more since PR#2192 [1] and there is no need to keep the backward compatibility any more, because there are no composer / worker instances in production, which are not running the modified code. In addition, delete unit tests covering this legacy error handling. [1] https://github.com/osbuild/osbuild-composer/pull/2192
This commit is contained in:
parent
c63bfe6d83
commit
59ded68457
5 changed files with 8 additions and 167 deletions
|
|
@ -717,65 +717,6 @@ func TestComposeStatusFailure(t *testing.T) {
|
|||
}`, jobId, jobId))
|
||||
}
|
||||
|
||||
func TestComposeLegacyError(t *testing.T) {
|
||||
srv, wrksrv, _, cancel := newV2Server(t, t.TempDir(), []string{""}, false)
|
||||
defer cancel()
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "POST", "/api/image-builder-composer/v2/compose", fmt.Sprintf(`
|
||||
{
|
||||
"distribution": "%s",
|
||||
"image_request":{
|
||||
"architecture": "%s",
|
||||
"image_type": "aws",
|
||||
"repositories": [{
|
||||
"baseurl": "somerepo.org",
|
||||
"rhsm": false
|
||||
}],
|
||||
"upload_options": {
|
||||
"region": "eu-central-1"
|
||||
}
|
||||
}
|
||||
}`, test_distro.TestDistroName, test_distro.TestArch3Name), http.StatusCreated, `
|
||||
{
|
||||
"href": "/api/image-builder-composer/v2/compose",
|
||||
"kind": "ComposeId"
|
||||
}`, "id")
|
||||
|
||||
jobId, token, jobType, _, _, err := wrksrv.RequestJob(context.Background(), test_distro.TestArch3Name, []string{worker.JobTypeOSBuild}, []string{""})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, worker.JobTypeOSBuild, jobType)
|
||||
|
||||
test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/image-builder-composer/v2/composes/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {"status": "building"},
|
||||
"status": "pending"
|
||||
}`, jobId, jobId))
|
||||
|
||||
jobResult, err := json.Marshal(worker.OSBuildJobResult{TargetErrors: []string{"Osbuild failed"}})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = wrksrv.FinishJob(token, jobResult)
|
||||
require.NoError(t, err)
|
||||
test.TestRoute(t, srv.Handler("/api/image-builder-composer/v2"), false, "GET", fmt.Sprintf("/api/image-builder-composer/v2/composes/%v", jobId), ``, http.StatusOK, fmt.Sprintf(`
|
||||
{
|
||||
"href": "/api/image-builder-composer/v2/composes/%v",
|
||||
"kind": "ComposeStatus",
|
||||
"id": "%v",
|
||||
"image_status": {
|
||||
"error": {
|
||||
"id": 10,
|
||||
"details": null,
|
||||
"reason": "osbuild build failed"
|
||||
},
|
||||
"status": "failure"
|
||||
},
|
||||
"status": "failure"
|
||||
}`, jobId, jobId))
|
||||
}
|
||||
|
||||
func TestComposeJobError(t *testing.T) {
|
||||
srv, wrksrv, _, cancel := newV2Server(t, t.TempDir(), []string{""}, false)
|
||||
defer cancel()
|
||||
|
|
|
|||
|
|
@ -44,7 +44,14 @@ func TestComposeStatusFromLegacyError(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, jobId, j)
|
||||
|
||||
jobResult := worker.OSBuildJobResult{TargetErrors: []string{"Upload error"}}
|
||||
jobResult := worker.OSBuildJobResult{
|
||||
JobResult: worker.JobResult{
|
||||
JobError: &clienterrors.Error{
|
||||
ID: clienterrors.ErrorUploadingImage,
|
||||
Reason: "Upload error",
|
||||
},
|
||||
},
|
||||
}
|
||||
rawResult, err := json.Marshal(jobResult)
|
||||
require.NoError(t, err)
|
||||
err = api.workers.FinishJob(token, rawResult)
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@ type OSBuildJobResult struct {
|
|||
Success bool `json:"success"`
|
||||
OSBuildOutput *osbuild.Result `json:"osbuild_output,omitempty"`
|
||||
TargetResults []*target.TargetResult `json:"target_results,omitempty"`
|
||||
TargetErrors []string `json:"target_errors,omitempty"`
|
||||
UploadStatus string `json:"upload_status"`
|
||||
PipelineNames *PipelineNames `json:"pipeline_names,omitempty"`
|
||||
// Host OS of the worker which handled the job
|
||||
|
|
|
|||
|
|
@ -258,8 +258,6 @@ func (s *Server) OSBuildJobStatus(id uuid.UUID, result *OSBuildJobResult) (*JobS
|
|||
result.JobError = clienterrors.WorkerClientError(clienterrors.ErrorBuildJob, "osbuild build failed")
|
||||
} else if len(result.OSBuildOutput.Error) > 0 {
|
||||
result.JobError = clienterrors.WorkerClientError(clienterrors.ErrorOldResultCompatible, string(result.OSBuildOutput.Error))
|
||||
} else if len(result.TargetErrors) > 0 {
|
||||
result.JobError = clienterrors.WorkerClientError(clienterrors.ErrorOldResultCompatible, result.TargetErrors[0])
|
||||
}
|
||||
}
|
||||
// For backwards compatibility: OSBuildJobResult didn't use to have a
|
||||
|
|
|
|||
|
|
@ -719,110 +719,6 @@ func TestDepsolveLegacyErrorConversion(t *testing.T) {
|
|||
require.Equal(t, expectedResult, depsolveJobResult)
|
||||
}
|
||||
|
||||
// Enquueue OSBuild jobs and save both kinds of
|
||||
// error types (old & new) to the queue and ensure
|
||||
// that both kinds of errors can be read back
|
||||
func TestMixedOSBuildJobErrors(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
emptyManifestV2 := distro.Manifest(`{"version":"2","pipelines":{}}`)
|
||||
server := newTestServer(t, t.TempDir(), time.Millisecond*10, "/", false)
|
||||
|
||||
oldJob := worker.OSBuildJob{
|
||||
Manifest: emptyManifestV2,
|
||||
ImageName: "no-pipeline-names",
|
||||
}
|
||||
oldJobID, err := server.EnqueueOSBuild("x", &oldJob, "")
|
||||
require.NoError(err)
|
||||
|
||||
newJob := worker.OSBuildJob{
|
||||
Manifest: emptyManifestV2,
|
||||
ImageName: "with-pipeline-names",
|
||||
PipelineNames: &worker.PipelineNames{
|
||||
Build: []string{"build"},
|
||||
Payload: []string{"other", "pipelines"},
|
||||
},
|
||||
}
|
||||
newJobID, err := server.EnqueueOSBuild("x", &newJob, "")
|
||||
require.NoError(err)
|
||||
|
||||
oldJobRead := new(worker.OSBuildJob)
|
||||
err = server.OSBuildJob(oldJobID, oldJobRead)
|
||||
require.NoError(err)
|
||||
// Not entirely equal
|
||||
require.NotEqual(oldJob, oldJobRead)
|
||||
|
||||
// NewJob the same when read back
|
||||
newJobRead := new(worker.OSBuildJob)
|
||||
err = server.OSBuildJob(newJobID, newJobRead)
|
||||
require.NoError(err)
|
||||
|
||||
// Dequeue the jobs (via RequestJob) to get their tokens and update them to
|
||||
// test the result retrieval
|
||||
|
||||
getJob := func() (uuid.UUID, uuid.UUID) {
|
||||
// don't block forever if the jobs weren't added or can't be retrieved
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer cancel()
|
||||
id, token, _, _, _, err := server.RequestJob(ctx, "x", []string{worker.JobTypeOSBuild}, []string{""})
|
||||
require.NoError(err)
|
||||
return id, token
|
||||
}
|
||||
|
||||
getJobTokens := func(n uint) map[uuid.UUID]uuid.UUID {
|
||||
tokens := make(map[uuid.UUID]uuid.UUID, n)
|
||||
for idx := uint(0); idx < n; idx++ {
|
||||
id, token := getJob()
|
||||
tokens[id] = token
|
||||
}
|
||||
return tokens
|
||||
}
|
||||
|
||||
jobTokens := getJobTokens(2)
|
||||
// make sure we got them both as expected
|
||||
require.Contains(jobTokens, oldJobID)
|
||||
require.Contains(jobTokens, newJobID)
|
||||
|
||||
oldJobResult := &worker.OSBuildJobResult{
|
||||
TargetErrors: []string{"Upload error"},
|
||||
}
|
||||
oldJobResultRaw, err := json.Marshal(oldJobResult)
|
||||
require.NoError(err)
|
||||
oldJobToken := jobTokens[oldJobID]
|
||||
err = server.FinishJob(oldJobToken, oldJobResultRaw)
|
||||
require.NoError(err)
|
||||
|
||||
oldJobResultRead := new(worker.OSBuildJobResult)
|
||||
_, _, err = server.OSBuildJobStatus(oldJobID, oldJobResultRead)
|
||||
require.NoError(err)
|
||||
|
||||
require.NotEqual(oldJobResult, oldJobResultRead)
|
||||
require.Equal(oldJobResult.Success, false)
|
||||
require.Equal(oldJobResultRead.Success, false)
|
||||
|
||||
newJobResult := &worker.OSBuildJobResult{
|
||||
PipelineNames: &worker.PipelineNames{
|
||||
Build: []string{"build-result"},
|
||||
Payload: []string{"result-test-payload", "result-test-assembler"},
|
||||
},
|
||||
JobResult: worker.JobResult{
|
||||
JobError: clienterrors.WorkerClientError(clienterrors.ErrorUploadingImage, "Error uploading image", nil),
|
||||
},
|
||||
}
|
||||
newJobResultRaw, err := json.Marshal(newJobResult)
|
||||
require.NoError(err)
|
||||
newJobToken := jobTokens[newJobID]
|
||||
err = server.FinishJob(newJobToken, newJobResultRaw)
|
||||
require.NoError(err)
|
||||
|
||||
newJobResultRead := new(worker.OSBuildJobResult)
|
||||
_, _, err = server.OSBuildJobStatus(newJobID, newJobResultRead)
|
||||
require.NoError(err)
|
||||
require.Equal(newJobResult, newJobResultRead)
|
||||
require.Equal(newJobResult.Success, false)
|
||||
require.Equal(newJobResultRead.Success, false)
|
||||
}
|
||||
|
||||
// Enquueue Koji jobs and save both kinds of
|
||||
// error types (old & new) to the queue and ensure
|
||||
// that both kinds of errors can be read back
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue