// Package v2 provides primitives to interact with the openapi HTTP API. // // Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. package v2 import ( "bytes" "compress/gzip" "encoding/base64" "encoding/json" "fmt" "net/http" "net/url" "path" "strings" "github.com/getkin/kin-openapi/openapi3" "github.com/labstack/echo/v4" "github.com/oapi-codegen/runtime" openapi_types "github.com/oapi-codegen/runtime/types" ) const ( BearerScopes = "Bearer.Scopes" ) // Defines values for AzureUploadOptionsHyperVGeneration. const ( V1 AzureUploadOptionsHyperVGeneration = "V1" V2 AzureUploadOptionsHyperVGeneration = "V2" ) // Defines values for BlueprintCustomizationsPartitioningMode. const ( BlueprintCustomizationsPartitioningModeAutoLvm BlueprintCustomizationsPartitioningMode = "auto-lvm" BlueprintCustomizationsPartitioningModeLvm BlueprintCustomizationsPartitioningMode = "lvm" BlueprintCustomizationsPartitioningModeRaw BlueprintCustomizationsPartitioningMode = "raw" ) // Defines values for ComposeStatusValue. const ( ComposeStatusValueFailure ComposeStatusValue = "failure" ComposeStatusValuePending ComposeStatusValue = "pending" ComposeStatusValueSuccess ComposeStatusValue = "success" ) // Defines values for CustomizationsPartitioningMode. const ( CustomizationsPartitioningModeAutoLvm CustomizationsPartitioningMode = "auto-lvm" CustomizationsPartitioningModeLvm CustomizationsPartitioningMode = "lvm" CustomizationsPartitioningModeRaw CustomizationsPartitioningMode = "raw" ) // Defines values for ImageSBOMPipelinePurpose. const ( Buildroot ImageSBOMPipelinePurpose = "buildroot" Image ImageSBOMPipelinePurpose = "image" ) // Defines values for ImageSBOMSbomType. const ( Spdx ImageSBOMSbomType = "spdx" ) // Defines values for ImageStatusValue. const ( ImageStatusValueBuilding ImageStatusValue = "building" ImageStatusValueFailure ImageStatusValue = "failure" ImageStatusValuePending ImageStatusValue = "pending" ImageStatusValueRegistering ImageStatusValue = "registering" ImageStatusValueSuccess ImageStatusValue = "success" ImageStatusValueUploading ImageStatusValue = "uploading" ) // Defines values for ImageTypes. const ( ImageTypesAws ImageTypes = "aws" ImageTypesAwsHaRhui ImageTypes = "aws-ha-rhui" ImageTypesAwsRhui ImageTypes = "aws-rhui" ImageTypesAwsSapRhui ImageTypes = "aws-sap-rhui" ImageTypesAzure ImageTypes = "azure" ImageTypesAzureEap7Rhui ImageTypes = "azure-eap7-rhui" ImageTypesAzureRhui ImageTypes = "azure-rhui" ImageTypesAzureSapRhui ImageTypes = "azure-sap-rhui" ImageTypesEdgeCommit ImageTypes = "edge-commit" ImageTypesEdgeContainer ImageTypes = "edge-container" ImageTypesEdgeInstaller ImageTypes = "edge-installer" ImageTypesGcp ImageTypes = "gcp" ImageTypesGcpRhui ImageTypes = "gcp-rhui" ImageTypesGuestImage ImageTypes = "guest-image" ImageTypesImageInstaller ImageTypes = "image-installer" ImageTypesIotBootableContainer ImageTypes = "iot-bootable-container" ImageTypesIotCommit ImageTypes = "iot-commit" ImageTypesIotContainer ImageTypes = "iot-container" ImageTypesIotInstaller ImageTypes = "iot-installer" ImageTypesIotRawImage ImageTypes = "iot-raw-image" ImageTypesIotSimplifiedInstaller ImageTypes = "iot-simplified-installer" ImageTypesLiveInstaller ImageTypes = "live-installer" ImageTypesMinimalRaw ImageTypes = "minimal-raw" ImageTypesOci ImageTypes = "oci" ImageTypesVsphere ImageTypes = "vsphere" ImageTypesVsphereOva ImageTypes = "vsphere-ova" ImageTypesWsl ImageTypes = "wsl" ) // Defines values for UploadStatusValue. const ( Failure UploadStatusValue = "failure" Pending UploadStatusValue = "pending" Running UploadStatusValue = "running" Success UploadStatusValue = "success" ) // Defines values for UploadTypes. const ( UploadTypesAws UploadTypes = "aws" UploadTypesAwsS3 UploadTypes = "aws.s3" UploadTypesAzure UploadTypes = "azure" UploadTypesContainer UploadTypes = "container" UploadTypesGcp UploadTypes = "gcp" UploadTypesLocal UploadTypes = "local" UploadTypesOciObjectstorage UploadTypes = "oci.objectstorage" UploadTypesPulpOstree UploadTypes = "pulp.ostree" ) // AWSEC2CloneCompose defines model for AWSEC2CloneCompose. type AWSEC2CloneCompose struct { Region string `json:"region"` ShareWithAccounts *[]string `json:"share_with_accounts,omitempty"` } // AWSEC2UploadOptions defines model for AWSEC2UploadOptions. type AWSEC2UploadOptions struct { Region string `json:"region"` ShareWithAccounts []string `json:"share_with_accounts"` SnapshotName *string `json:"snapshot_name,omitempty"` } // AWSEC2UploadStatus defines model for AWSEC2UploadStatus. type AWSEC2UploadStatus struct { Ami string `json:"ami"` Region string `json:"region"` } // AWSS3UploadOptions defines model for AWSS3UploadOptions. type AWSS3UploadOptions struct { // Public If set to false (the default value), a long, obfuscated URL // is returned. Its expiration might be sooner than for other upload // targets. // // If set to true, a shorter URL is returned and // its expiration is the same as for the other upload targets. Public *bool `json:"public,omitempty"` Region string `json:"region"` } // AWSS3UploadStatus defines model for AWSS3UploadStatus. type AWSS3UploadStatus struct { Url string `json:"url"` } // AzureUploadOptions defines model for AzureUploadOptions. type AzureUploadOptions struct { // HyperVGeneration Choose the VM Image HyperV generation, different features on Azure are available // depending on the HyperV generation. HyperVGeneration *AzureUploadOptionsHyperVGeneration `json:"hyper_v_generation,omitempty"` // ImageName Name of the uploaded image. It must be unique in the given resource group. // If name is omitted from the request, a random one based on a UUID is // generated. ImageName *string `json:"image_name,omitempty"` // Location Location of the provided resource_group, where the image should be uploaded and registered. // How to list all locations: // https://docs.microsoft.com/en-us/cli/azure/account?view=azure-cli-latest#az_account_list_locations' // If the location is not specified, it is deducted from the provided resource_group. Location *string `json:"location,omitempty"` // ResourceGroup Name of the resource group where the image should be uploaded. ResourceGroup string `json:"resource_group"` // SubscriptionId ID of subscription where the image should be uploaded. SubscriptionId string `json:"subscription_id"` // TenantId ID of the tenant where the image should be uploaded. // How to find it in the Azure Portal: // https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant TenantId string `json:"tenant_id"` } // AzureUploadOptionsHyperVGeneration Choose the VM Image HyperV generation, different features on Azure are available // depending on the HyperV generation. type AzureUploadOptionsHyperVGeneration string // AzureUploadStatus defines model for AzureUploadStatus. type AzureUploadStatus struct { ImageName string `json:"image_name"` } // Blueprint defines model for Blueprint. type Blueprint struct { // Containers Container images to embed into the final artfact Containers *[]Container `json:"containers,omitempty"` Customizations *BlueprintCustomizations `json:"customizations,omitempty"` Description *string `json:"description,omitempty"` // Distro The distribution to use for the compose. If left empty the host // distro will be used. Distro *string `json:"distro,omitempty"` EnabledModules *[]Module `json:"enabled_modules,omitempty"` // Groups Package groups to be installed Groups *[]PackageGroup `json:"groups,omitempty"` // Modules An alias for packages, retained for backwards compatability Modules *[]Package `json:"modules,omitempty"` Name string `json:"name"` // Packages Packages to be installed Packages *[]Package `json:"packages,omitempty"` // Version A semver version number Version *string `json:"version,omitempty"` } // BlueprintCustomizations defines model for BlueprintCustomizations. type BlueprintCustomizations struct { Cacerts *CACertsCustomization `json:"cacerts,omitempty"` // Directories Directories to create in the final artifact Directories *[]Directory `json:"directories,omitempty"` // Fdo FIDO device onboard configuration Fdo *FDO `json:"fdo,omitempty"` // Files Files to create in the final artifact Files *[]BlueprintFile `json:"files,omitempty"` // Filesystem List of filesystem mountpoints to create Filesystem *[]BlueprintFilesystem `json:"filesystem,omitempty"` // Fips Enable FIPS mode Fips *bool `json:"fips,omitempty"` // Firewall Firewalld configuration Firewall *BlueprintFirewall `json:"firewall,omitempty"` // Group List of groups to create Group *[]Group `json:"group,omitempty"` // Hostname Configures the hostname Hostname *string `json:"hostname,omitempty"` // Ignition Ignition configuration Ignition *Ignition `json:"ignition,omitempty"` // InstallationDevice Name of the installation device, currently only useful for the edge-simplified-installer type InstallationDevice *string `json:"installation_device,omitempty"` Installer *Installer `json:"installer,omitempty"` Kernel *Kernel `json:"kernel,omitempty"` // Locale Locale configuration Locale *Locale `json:"locale,omitempty"` Openscap *BlueprintOpenSCAP `json:"openscap,omitempty"` // PartitioningMode Select how the disk image will be partitioned. 'auto-lvm' will use raw unless // there are one or more mountpoints in which case it will use LVM. 'lvm' always // uses LVM, even when there are no extra mountpoints. 'raw' uses raw partitions // even when there are one or more mountpoints. PartitioningMode *BlueprintCustomizationsPartitioningMode `json:"partitioning_mode,omitempty"` // Repositories Repositories to write to /etc/yum.repos.d/ in the final image. Note // that these are not used at build time. Repositories *[]BlueprintRepository `json:"repositories,omitempty"` Rhsm *RHSMCustomization `json:"rhsm,omitempty"` Rpm *RPMCustomization `json:"rpm,omitempty"` Services *Services `json:"services,omitempty"` // Sshkey List of ssh keys Sshkey *[]SSHKey `json:"sshkey,omitempty"` // Timezone Timezone configuration Timezone *Timezone `json:"timezone,omitempty"` // User List of users to create User *[]BlueprintUser `json:"user,omitempty"` } // BlueprintCustomizationsPartitioningMode Select how the disk image will be partitioned. 'auto-lvm' will use raw unless // there are one or more mountpoints in which case it will use LVM. 'lvm' always // uses LVM, even when there are no extra mountpoints. 'raw' uses raw partitions // even when there are one or more mountpoints. type BlueprintCustomizationsPartitioningMode string // BlueprintFile A custom file to create in the final artifact. type BlueprintFile struct { // Data Contents of the file as plain text Data *string `json:"data,omitempty"` // Group Group of the file as a gid or a group name Group *BlueprintFile_Group `json:"group,omitempty"` // Mode Permissions string for the file in octal format Mode *string `json:"mode,omitempty"` // Path Path to the file Path string `json:"path"` // User Owner of the file as a uid or a user name User *BlueprintFile_User `json:"user,omitempty"` } // BlueprintFileGroup0 defines model for . type BlueprintFileGroup0 = string // BlueprintFileGroup1 defines model for . type BlueprintFileGroup1 = int // BlueprintFile_Group Group of the file as a gid or a group name type BlueprintFile_Group struct { union json.RawMessage } // BlueprintFileUser0 defines model for . type BlueprintFileUser0 = string // BlueprintFileUser1 defines model for . type BlueprintFileUser1 = int // BlueprintFile_User Owner of the file as a uid or a user name type BlueprintFile_User struct { union json.RawMessage } // BlueprintFilesystem defines model for BlueprintFilesystem. type BlueprintFilesystem struct { // Minsize size of the filesystem in bytes Minsize uint64 `json:"minsize"` Mountpoint string `json:"mountpoint"` } // BlueprintFirewall Firewalld configuration type BlueprintFirewall struct { // Ports List of ports (or port ranges) and protocols to open Ports *[]string `json:"ports,omitempty"` // Services Firewalld services to enable or disable Services *FirewallServices `json:"services,omitempty"` Zones *[]FirewallZones `json:"zones,omitempty"` } // BlueprintOpenSCAP defines model for BlueprintOpenSCAP. type BlueprintOpenSCAP struct { Datastream *string `json:"datastream,omitempty"` JsonTailoring *OpenSCAPJSONTailoring `json:"json_tailoring,omitempty"` // PolicyId Puts a specified policy ID in the RHSM facts, so that any instances registered to // insights will be automatically connected to the compliance policy in the console. PolicyId *openapi_types.UUID `json:"policy_id,omitempty"` ProfileId string `json:"profile_id"` Tailoring *OpenSCAPTailoring `json:"tailoring,omitempty"` } // BlueprintRepository defines model for BlueprintRepository. type BlueprintRepository struct { Baseurls *[]string `json:"baseurls,omitempty"` Enabled *bool `json:"enabled,omitempty"` Filename *string `json:"filename,omitempty"` Gpgcheck *bool `json:"gpgcheck,omitempty"` Gpgkeys *[]string `json:"gpgkeys,omitempty"` Id string `json:"id"` Metalink *string `json:"metalink,omitempty"` Mirrorlist *string `json:"mirrorlist,omitempty"` // ModuleHotfixes Disables modularity filtering for this repository. ModuleHotfixes *bool `json:"module_hotfixes,omitempty"` Name *string `json:"name,omitempty"` Priority *int `json:"priority,omitempty"` RepoGpgcheck *bool `json:"repo_gpgcheck,omitempty"` Sslverify *bool `json:"sslverify,omitempty"` } // BlueprintUser defines model for BlueprintUser. type BlueprintUser struct { Description *string `json:"description,omitempty"` // Gid Group id to use instead of the default Gid *int `json:"gid,omitempty"` // Groups A list of additional groups to add the user to Groups *[]string `json:"groups,omitempty"` // Home The user's home directory Home *string `json:"home,omitempty"` // Key ssh public key Key *string `json:"key,omitempty"` Name string `json:"name"` // Password If the password starts with $6$, $5$, or $2b$ it will be stored as // an encrypted password. Otherwise it will be treated as a plain text // password. Password *string `json:"password,omitempty"` // Shell Login shell to use Shell *string `json:"shell,omitempty"` // Uid User id to use instead of the default Uid *int `json:"uid,omitempty"` } // CACertsCustomization defines model for CACertsCustomization. type CACertsCustomization struct { PemCerts []string `json:"pem_certs"` } // CloneComposeBody defines model for CloneComposeBody. type CloneComposeBody struct { union json.RawMessage } // CloneComposeResponse defines model for CloneComposeResponse. type CloneComposeResponse struct { Href string `json:"href"` Id openapi_types.UUID `json:"id"` Kind string `json:"kind"` } // CloneStatus defines model for CloneStatus. type CloneStatus struct { Href string `json:"href"` Id string `json:"id"` Kind string `json:"kind"` Options CloneStatus_Options `json:"options"` Status UploadStatusValue `json:"status"` Type UploadTypes `json:"type"` } // CloneStatus_Options defines model for CloneStatus.Options. type CloneStatus_Options struct { union json.RawMessage } // ComposeId defines model for ComposeId. type ComposeId struct { Href string `json:"href"` Id openapi_types.UUID `json:"id"` Kind string `json:"kind"` } // ComposeList defines model for ComposeList. type ComposeList struct { Items []ComposeStatus `json:"items"` Kind string `json:"kind"` Page int `json:"page"` Size int `json:"size"` Total int `json:"total"` } // ComposeLogs defines model for ComposeLogs. type ComposeLogs struct { Href string `json:"href"` Id string `json:"id"` ImageBuilds []interface{} `json:"image_builds"` Kind string `json:"kind"` Koji *KojiLogs `json:"koji,omitempty"` } // ComposeManifests defines model for ComposeManifests. type ComposeManifests struct { Href string `json:"href"` Id string `json:"id"` Kind string `json:"kind"` Manifests []interface{} `json:"manifests"` } // ComposeMetadata defines model for ComposeMetadata. type ComposeMetadata struct { Href string `json:"href"` Id string `json:"id"` Kind string `json:"kind"` // OstreeCommit ID (hash) of the built commit OstreeCommit *string `json:"ostree_commit,omitempty"` // Packages Package list including NEVRA Packages *[]PackageMetadata `json:"packages,omitempty"` Request *ComposeRequest `json:"request,omitempty"` } // ComposeRequest defines model for ComposeRequest. type ComposeRequest struct { Blueprint *Blueprint `json:"blueprint,omitempty"` Customizations *Customizations `json:"customizations,omitempty"` Distribution string `json:"distribution"` ImageRequest *ImageRequest `json:"image_request,omitempty"` ImageRequests *[]ImageRequest `json:"image_requests,omitempty"` Koji *Koji `json:"koji,omitempty"` } // ComposeSBOMs defines model for ComposeSBOMs. type ComposeSBOMs struct { Href string `json:"href"` Id string `json:"id"` // Items The SBOM documents for each image built in the compose. Items [][]ImageSBOM `json:"items"` Kind string `json:"kind"` } // ComposeStatus defines model for ComposeStatus. type ComposeStatus struct { Href string `json:"href"` Id string `json:"id"` ImageStatus ImageStatus `json:"image_status"` ImageStatuses *[]ImageStatus `json:"image_statuses,omitempty"` Kind string `json:"kind"` KojiStatus *KojiStatus `json:"koji_status,omitempty"` Status ComposeStatusValue `json:"status"` } // ComposeStatusError defines model for ComposeStatusError. type ComposeStatusError struct { Details *interface{} `json:"details,omitempty"` Id int `json:"id"` Reason string `json:"reason"` } // ComposeStatusValue defines model for ComposeStatusValue. type ComposeStatusValue string // Container defines model for Container. type Container struct { // Name Name to use for the container from the image Name *string `json:"name,omitempty"` // Source Reference to the container to embed Source string `json:"source"` // TlsVerify Control TLS verifification TlsVerify *bool `json:"tls_verify,omitempty"` } // ContainerUploadOptions defines model for ContainerUploadOptions. type ContainerUploadOptions struct { // Name Name for the created container image Name *string `json:"name,omitempty"` // Tag Tag for the created container image Tag *string `json:"tag,omitempty"` } // ContainerUploadStatus defines model for ContainerUploadStatus. type ContainerUploadStatus struct { // Digest Digest of the manifest of the uploaded container on the registry Digest string `json:"digest"` // Url FQDN of the uploaded image Url string `json:"url"` } // CustomRepository defines model for CustomRepository. type CustomRepository struct { Baseurl *[]string `json:"baseurl,omitempty"` CheckGpg *bool `json:"check_gpg,omitempty"` CheckRepoGpg *bool `json:"check_repo_gpg,omitempty"` Enabled *bool `json:"enabled,omitempty"` Filename *string `json:"filename,omitempty"` Gpgkey *[]string `json:"gpgkey,omitempty"` Id string `json:"id"` Metalink *string `json:"metalink,omitempty"` Mirrorlist *string `json:"mirrorlist,omitempty"` ModuleHotfixes *bool `json:"module_hotfixes,omitempty"` Name *string `json:"name,omitempty"` Priority *int `json:"priority,omitempty"` SslVerify *bool `json:"ssl_verify,omitempty"` } // Customizations defines model for Customizations. type Customizations struct { Cacerts *CACertsCustomization `json:"cacerts,omitempty"` Containers *[]Container `json:"containers,omitempty"` // CustomRepositories Extra repositories for packages specified in customizations. These // repositories will be used to depsolve and retrieve packages. Additionally, // these packages will be saved and imported to the `/etc/yum.repos.d/` directory // on the image CustomRepositories *[]CustomRepository `json:"custom_repositories,omitempty"` Directories *[]Directory `json:"directories,omitempty"` EnabledModules *[]Module `json:"enabled_modules,omitempty"` // Fdo FIDO device onboard configuration Fdo *FDO `json:"fdo,omitempty"` Files *[]File `json:"files,omitempty"` Filesystem *[]Filesystem `json:"filesystem,omitempty"` // Fips System FIPS mode setup Fips *FIPS `json:"fips,omitempty"` // Firewall Firewalld configuration Firewall *FirewallCustomization `json:"firewall,omitempty"` // Groups List of groups to create Groups *[]Group `json:"groups,omitempty"` // Hostname Configures the hostname Hostname *string `json:"hostname,omitempty"` // Ignition Ignition configuration Ignition *Ignition `json:"ignition,omitempty"` // InstallationDevice Name of the installation device, currently only useful for the edge-simplified-installer type InstallationDevice *string `json:"installation_device,omitempty"` Installer *Installer `json:"installer,omitempty"` Kernel *Kernel `json:"kernel,omitempty"` // Locale Locale configuration Locale *Locale `json:"locale,omitempty"` Openscap *OpenSCAP `json:"openscap,omitempty"` Packages *[]string `json:"packages,omitempty"` // PartitioningMode Select how the disk image will be partitioned. 'auto-lvm' will use raw unless // there are one or more mountpoints in which case it will use LVM. 'lvm' always // uses LVM, even when there are no extra mountpoints. 'raw' uses raw partitions // even when there are one or more mountpoints. PartitioningMode *CustomizationsPartitioningMode `json:"partitioning_mode,omitempty"` // PayloadRepositories Extra repositories for packages specified in customizations. These // repositories will only be used to depsolve and retrieve packages // for the OS itself (they will not be available for the build root or // any other part of the build process). The package_sets field for these // repositories is ignored. PayloadRepositories *[]Repository `json:"payload_repositories,omitempty"` Rhsm *RHSMCustomization `json:"rhsm,omitempty"` Rpm *RPMCustomization `json:"rpm,omitempty"` Services *Services `json:"services,omitempty"` Subscription *Subscription `json:"subscription,omitempty"` // Timezone Timezone configuration Timezone *Timezone `json:"timezone,omitempty"` Users *[]User `json:"users,omitempty"` } // CustomizationsPartitioningMode Select how the disk image will be partitioned. 'auto-lvm' will use raw unless // there are one or more mountpoints in which case it will use LVM. 'lvm' always // uses LVM, even when there are no extra mountpoints. 'raw' uses raw partitions // even when there are one or more mountpoints. type CustomizationsPartitioningMode string // DNFPluginConfig defines model for DNFPluginConfig. type DNFPluginConfig struct { Enabled *bool `json:"enabled,omitempty"` } // DepsolveRequest defines model for DepsolveRequest. type DepsolveRequest struct { Architecture string `json:"architecture"` Blueprint Blueprint `json:"blueprint"` Distribution string `json:"distribution"` Repositories *[]Repository `json:"repositories,omitempty"` } // DepsolveResponse defines model for DepsolveResponse. type DepsolveResponse struct { // Packages Package list including NEVRA Packages []PackageMetadataCommon `json:"packages"` } // Directory A custom directory to create in the final artifact. type Directory struct { // EnsureParents Ensure that the parent directories exist EnsureParents *bool `json:"ensure_parents,omitempty"` // Group Group of the directory as a group name or a gid Group *Directory_Group `json:"group,omitempty"` // Mode Permissions string for the directory in octal format Mode *string `json:"mode,omitempty"` // Path Path to the directory Path string `json:"path"` // User Owner of the directory as a user name or a uid User *Directory_User `json:"user,omitempty"` } // DirectoryGroup0 defines model for . type DirectoryGroup0 = string // DirectoryGroup1 defines model for . type DirectoryGroup1 = int // Directory_Group Group of the directory as a group name or a gid type Directory_Group struct { union json.RawMessage } // DirectoryUser0 defines model for . type DirectoryUser0 = string // DirectoryUser1 defines model for . type DirectoryUser1 = int // Directory_User Owner of the directory as a user name or a uid type Directory_User struct { union json.RawMessage } // DistributionList defines model for DistributionList. type DistributionList struct { // Map Distribution name Map *map[string]interface{} `json:"map,omitempty"` } // Error defines model for Error. type Error struct { Code string `json:"code"` Details *interface{} `json:"details,omitempty"` Href string `json:"href"` Id string `json:"id"` Kind string `json:"kind"` OperationId string `json:"operation_id"` Reason string `json:"reason"` } // ErrorList defines model for ErrorList. type ErrorList struct { Items []Error `json:"items"` Kind string `json:"kind"` Page int `json:"page"` Size int `json:"size"` Total int `json:"total"` } // FDO FIDO device onboard configuration type FDO struct { DiMfgStringTypeMacIface *string `json:"di_mfg_string_type_mac_iface,omitempty"` DiunPubKeyHash *string `json:"diun_pub_key_hash,omitempty"` DiunPubKeyInsecure *string `json:"diun_pub_key_insecure,omitempty"` DiunPubKeyRootCerts *string `json:"diun_pub_key_root_certs,omitempty"` ManufacturingServerUrl *string `json:"manufacturing_server_url,omitempty"` } // FIPS System FIPS mode setup type FIPS struct { // Enabled Enables the system FIPS mode Enabled *bool `json:"enabled,omitempty"` } // File A custom file to create in the final artifact. type File struct { // Data Contents of the file as plain text Data *string `json:"data,omitempty"` // EnsureParents Ensure that the parent directories exist EnsureParents *bool `json:"ensure_parents,omitempty"` // Group Group of the file as a gid or a group name Group *File_Group `json:"group,omitempty"` // Mode Permissions string for the file in octal format Mode *string `json:"mode,omitempty"` // Path Path to the file Path string `json:"path"` // User Owner of the file as a uid or a user name User *File_User `json:"user,omitempty"` } // FileGroup0 defines model for . type FileGroup0 = string // FileGroup1 defines model for . type FileGroup1 = int // File_Group Group of the file as a gid or a group name type File_Group struct { union json.RawMessage } // FileUser0 defines model for . type FileUser0 = string // FileUser1 defines model for . type FileUser1 = int // File_User Owner of the file as a uid or a user name type File_User struct { union json.RawMessage } // Filesystem defines model for Filesystem. type Filesystem struct { // MinSize size of the filesystem in bytes MinSize uint64 `json:"min_size"` Mountpoint string `json:"mountpoint"` } // FirewallCustomization Firewalld configuration type FirewallCustomization struct { // Ports List of ports (or port ranges) and protocols to open Ports *[]string `json:"ports,omitempty"` // Services Firewalld services to enable or disable Services *FirewallServices `json:"services,omitempty"` } // FirewallServices Firewalld services to enable or disable type FirewallServices struct { // Disabled List of services to disable Disabled *[]string `json:"disabled,omitempty"` // Enabled List of services to enable Enabled *[]string `json:"enabled,omitempty"` } // FirewallZones Bind a list of network sources to a zone to restrict traffic from // those sources based on the settings of the zone. type FirewallZones struct { // Name name of the zone, if left empty the sources will apply to // the default zone. Name *string `json:"name,omitempty"` // Sources List of sources for the zone Sources *[]string `json:"sources,omitempty"` } // GCPUploadOptions defines model for GCPUploadOptions. type GCPUploadOptions struct { // Bucket Name of an existing STANDARD Storage class Bucket. Bucket *string `json:"bucket,omitempty"` // ImageName The name to use for the imported and shared Compute Engine image. // The image name must be unique within the GCP project, which is used // for the OS image upload and import. If not specified a random // 'composer-api-' string is used as the image name. ImageName *string `json:"image_name,omitempty"` // Region The GCP region where the OS image will be imported to and shared from. // The value must be a valid GCP location. See https://cloud.google.com/storage/docs/locations. // If not specified, the multi-region location closest to the source // (source Storage Bucket location) is chosen automatically. Region string `json:"region"` // ShareWithAccounts List of valid Google accounts to share the imported Compute Engine image with. // Each string must contain a specifier of the account type. Valid formats are: // - 'user:{emailid}': An email address that represents a specific // Google account. For example, 'alice@example.com'. // - 'serviceAccount:{emailid}': An email address that represents a // service account. For example, 'my-other-app@appspot.gserviceaccount.com'. // - 'group:{emailid}': An email address that represents a Google group. // For example, 'admins@example.com'. // - 'domain:{domain}': The G Suite domain (primary) that represents all // the users of that domain. For example, 'google.com' or 'example.com'. // If not specified, the imported Compute Engine image is not shared with any // account. ShareWithAccounts *[]string `json:"share_with_accounts,omitempty"` } // GCPUploadStatus defines model for GCPUploadStatus. type GCPUploadStatus struct { ImageName string `json:"image_name"` ProjectId string `json:"project_id"` } // Group defines model for Group. type Group struct { // Gid Group id of the group to create (optional) Gid *int `json:"gid,omitempty"` // Name Name of the group to create Name string `json:"name"` } // Ignition Ignition configuration type Ignition struct { Embedded *IgnitionEmbedded `json:"embedded,omitempty"` Firstboot *IgnitionFirstboot `json:"firstboot,omitempty"` } // IgnitionEmbedded defines model for IgnitionEmbedded. type IgnitionEmbedded struct { Config string `json:"config"` } // IgnitionFirstboot defines model for IgnitionFirstboot. type IgnitionFirstboot struct { // Url Provisioning URL Url string `json:"url"` } // ImageRequest defines model for ImageRequest. type ImageRequest struct { Architecture string `json:"architecture"` ImageType ImageTypes `json:"image_type"` Ostree *OSTree `json:"ostree,omitempty"` Repositories []Repository `json:"repositories"` // Size Size of image, in bytes. When set to 0 the image size is a minimum // defined by the image type. Size *uint64 `json:"size,omitempty"` // UploadOptions Options for a given upload destination. // This should really be oneOf but AWSS3UploadOptions is a subset of // AWSEC2UploadOptions. This means that all AWSEC2UploadOptions objects // are also valid AWSS3UploadOptionas objects which violates the oneOf // rules. Therefore, we have to use anyOf here but be aware that it isn't // possible to mix and match more schemas together. UploadOptions *UploadOptions `json:"upload_options,omitempty"` // UploadTargets The type and options for multiple upload targets. Each item defines // a separate upload destination with its own options. Multiple // different targets as well as multiple targets of the same kind are // supported. UploadTargets *[]UploadTarget `json:"upload_targets,omitempty"` } // ImageSBOM defines model for ImageSBOM. type ImageSBOM struct { // PipelineName The name of the osbuild pipeline which has the packages described // in the SBOM installed. PipelineName string `json:"pipeline_name"` // PipelinePurpose The purpose of the pipeline. The `buildroot` pipeline was used for // the build environment dueing the image build. The `image` pipeline // represents the actual content of the image. Due to the nature of // some image types, there may be multiple pipelines of the same // purpose. PipelinePurpose ImageSBOMPipelinePurpose `json:"pipeline_purpose"` // Sbom The SBOM document in the 'sbom_type' format. Sbom interface{} `json:"sbom"` // SbomType The type of the SBOM document. Currently only SPDX is supported. SbomType ImageSBOMSbomType `json:"sbom_type"` } // ImageSBOMPipelinePurpose The purpose of the pipeline. The `buildroot` pipeline was used for // the build environment dueing the image build. The `image` pipeline // represents the actual content of the image. Due to the nature of // some image types, there may be multiple pipelines of the same // purpose. type ImageSBOMPipelinePurpose string // ImageSBOMSbomType The type of the SBOM document. Currently only SPDX is supported. type ImageSBOMSbomType string // ImageStatus defines model for ImageStatus. type ImageStatus struct { Error *ComposeStatusError `json:"error,omitempty"` Status ImageStatusValue `json:"status"` UploadStatus *UploadStatus `json:"upload_status,omitempty"` UploadStatuses *[]UploadStatus `json:"upload_statuses,omitempty"` } // ImageStatusValue defines model for ImageStatusValue. type ImageStatusValue string // ImageTypes defines model for ImageTypes. type ImageTypes string // ImportKeys defines model for ImportKeys. type ImportKeys struct { Files *[]string `json:"files,omitempty"` } // Installer defines model for Installer. type Installer struct { SudoNopasswd *[]string `json:"sudo-nopasswd,omitempty"` Unattended *bool `json:"unattended,omitempty"` } // Kernel defines model for Kernel. type Kernel struct { // Append Appends arguments to the bootloader kernel command line Append *string `json:"append,omitempty"` // Name Name of the kernel to use Name *string `json:"name,omitempty"` } // Koji defines model for Koji. type Koji struct { Name string `json:"name"` Release string `json:"release"` Server string `json:"server"` TaskId int `json:"task_id"` Version string `json:"version"` } // KojiLogs defines model for KojiLogs. type KojiLogs struct { Import interface{} `json:"import"` Init interface{} `json:"init"` } // KojiStatus defines model for KojiStatus. type KojiStatus struct { BuildId *int `json:"build_id,omitempty"` } // List defines model for List. type List struct { Kind string `json:"kind"` Page int `json:"page"` Size int `json:"size"` Total int `json:"total"` } // LocalUploadOptions defines model for LocalUploadOptions. type LocalUploadOptions = map[string]interface{} // LocalUploadStatus defines model for LocalUploadStatus. type LocalUploadStatus struct { ArtifactPath string `json:"artifact_path"` } // Locale Locale configuration type Locale struct { // Keyboard Sets the keyboard layout Keyboard *string `json:"keyboard,omitempty"` // Languages List of locales to be installed, the first one becomes primary, subsequent ones are secondary Languages *[]string `json:"languages,omitempty"` } // Module defines model for Module. type Module struct { // Name Name of the module to enable. Name string `json:"name"` // Stream Stream to enable. Stream string `json:"stream"` } // OCIUploadOptions defines model for OCIUploadOptions. type OCIUploadOptions = map[string]interface{} // OCIUploadStatus defines model for OCIUploadStatus. type OCIUploadStatus struct { Url string `json:"url"` } // OSTree defines model for OSTree. type OSTree struct { // Contenturl A URL which, if set, is used for fetching content. Implies that `url` is set as well, // which will be used for metadata only. Contenturl *string `json:"contenturl,omitempty"` // Parent Can be either a commit (example: 02604b2da6e954bd34b8b82a835e5a77d2b60ffa), or a branch-like reference (example: rhel/8/x86_64/edge) Parent *string `json:"parent,omitempty"` Ref *string `json:"ref,omitempty"` // Rhsm Determines whether a valid subscription manager (candlepin) identity is required to // access this repository. Consumer certificates will be used as client certificates when // fetching metadata and content. Rhsm *bool `json:"rhsm,omitempty"` Url *string `json:"url,omitempty"` } // ObjectReference defines model for ObjectReference. type ObjectReference struct { Href string `json:"href"` Id string `json:"id"` Kind string `json:"kind"` } // OpenSCAP defines model for OpenSCAP. type OpenSCAP struct { JsonTailoring *OpenSCAPJSONTailoring `json:"json_tailoring,omitempty"` // PolicyId Puts a specified policy ID in the RHSM facts, so that any instances registered to // insights will be automatically connected to the compliance policy in the console. PolicyId *openapi_types.UUID `json:"policy_id,omitempty"` ProfileId string `json:"profile_id"` Tailoring *OpenSCAPTailoring `json:"tailoring,omitempty"` } // OpenSCAPJSONTailoring defines model for OpenSCAPJSONTailoring. type OpenSCAPJSONTailoring struct { Filepath string `json:"filepath"` ProfileId string `json:"profile_id"` } // OpenSCAPTailoring defines model for OpenSCAPTailoring. type OpenSCAPTailoring struct { Selected *[]string `json:"selected,omitempty"` Unselected *[]string `json:"unselected,omitempty"` } // Package defines model for Package. type Package struct { // Name Name of the package to install. File globbing is supported, // eg. 'openssh-*' Name string `json:"name"` // Version Optional version of the package to install. If left blank the // latest available version will be used. Wildcards are supported // eg. '4.11.*' Version *string `json:"version,omitempty"` } // PackageDetails defines model for PackageDetails. type PackageDetails struct { Arch string `json:"arch"` Buildtime *string `json:"buildtime,omitempty"` Description *string `json:"description,omitempty"` Epoch *string `json:"epoch,omitempty"` License *string `json:"license,omitempty"` Name string `json:"name"` Release string `json:"release"` Summary *string `json:"summary,omitempty"` Url *string `json:"url,omitempty"` Version string `json:"version"` } // PackageGroup defines model for PackageGroup. type PackageGroup struct { // Name Package group name Name string `json:"name"` } // PackageMetadata defines model for PackageMetadata. type PackageMetadata struct { Arch string `json:"arch"` // Checksum Optional package checksum using ALGO:HASH form Checksum *string `json:"checksum,omitempty"` Epoch *string `json:"epoch,omitempty"` Name string `json:"name"` Release string `json:"release"` Sigmd5 string `json:"sigmd5"` Signature *string `json:"signature,omitempty"` Type string `json:"type"` Version string `json:"version"` } // PackageMetadataCommon defines model for PackageMetadataCommon. type PackageMetadataCommon struct { Arch string `json:"arch"` // Checksum Optional package checksum using ALGO:HASH form Checksum *string `json:"checksum,omitempty"` Epoch *string `json:"epoch,omitempty"` Name string `json:"name"` Release string `json:"release"` Signature *string `json:"signature,omitempty"` Type string `json:"type"` Version string `json:"version"` } // PulpOSTreeUploadOptions defines model for PulpOSTreeUploadOptions. type PulpOSTreeUploadOptions struct { // Basepath Basepath for distributing the repository Basepath string `json:"basepath"` // Repository Repository to import the ostree commit to Repository *string `json:"repository,omitempty"` ServerAddress *string `json:"server_address,omitempty"` } // PulpOSTreeUploadStatus defines model for PulpOSTreeUploadStatus. type PulpOSTreeUploadStatus struct { RepoUrl string `json:"repo_url"` } // RHSMConfig defines model for RHSMConfig. type RHSMConfig struct { DnfPlugins *SubManDNFPluginsConfig `json:"dnf_plugins,omitempty"` SubscriptionManager *SubManConfig `json:"subscription_manager,omitempty"` } // RHSMCustomization defines model for RHSMCustomization. type RHSMCustomization struct { Config *RHSMConfig `json:"config,omitempty"` } // RPMCustomization defines model for RPMCustomization. type RPMCustomization struct { ImportKeys *ImportKeys `json:"import_keys,omitempty"` } // Repository Repository configuration. // At least one of the 'baseurl', 'mirrorlist', 'metalink' properties must // be specified. If more of them are specified, the order of precedence is // the same as listed above. type Repository struct { Baseurl *string `json:"baseurl,omitempty"` CheckGpg *bool `json:"check_gpg,omitempty"` // CheckRepoGpg Enables gpg verification of the repository metadata CheckRepoGpg *bool `json:"check_repo_gpg,omitempty"` // Gpgkey GPG key used to sign packages in this repository. Gpgkey *string `json:"gpgkey,omitempty"` IgnoreSsl *bool `json:"ignore_ssl,omitempty"` Metalink *string `json:"metalink,omitempty"` Mirrorlist *string `json:"mirrorlist,omitempty"` // ModuleHotfixes Disables modularity filtering for this repository. ModuleHotfixes *bool `json:"module_hotfixes,omitempty"` // PackageSets Naming package sets for a repository assigns it to a specific part // (pipeline) of the build process. PackageSets *[]string `json:"package_sets,omitempty"` // Rhsm Determines whether a valid subscription is required to access this repository. Rhsm *bool `json:"rhsm,omitempty"` } // SSHKey defines model for SSHKey. type SSHKey struct { // Key Adds the key to the user's authorized_keys file Key string `json:"key"` // User User to configure the ssh key for User string `json:"user"` } // SearchPackagesRequest defines model for SearchPackagesRequest. type SearchPackagesRequest struct { Architecture string `json:"architecture"` Distribution string `json:"distribution"` // Packages Array of package names to search for. Supports * wildcards for // names, but not for versions. Packages []string `json:"packages"` Repositories *[]Repository `json:"repositories,omitempty"` } // SearchPackagesResponse defines model for SearchPackagesResponse. type SearchPackagesResponse struct { // Packages Detailed package information from DNF Packages []PackageDetails `json:"packages"` } // Services defines model for Services. type Services struct { // Disabled List of services to disable by default Disabled *[]string `json:"disabled,omitempty"` // Enabled List of services to enable by default Enabled *[]string `json:"enabled,omitempty"` // Masked List of services to mask by default Masked *[]string `json:"masked,omitempty"` } // SubManConfig defines model for SubManConfig. type SubManConfig struct { Rhsm *SubManRHSMConfig `json:"rhsm,omitempty"` Rhsmcertd *SubManRHSMCertdConfig `json:"rhsmcertd,omitempty"` } // SubManDNFPluginsConfig defines model for SubManDNFPluginsConfig. type SubManDNFPluginsConfig struct { ProductId *DNFPluginConfig `json:"product_id,omitempty"` SubscriptionManager *DNFPluginConfig `json:"subscription_manager,omitempty"` } // SubManRHSMCertdConfig defines model for SubManRHSMCertdConfig. type SubManRHSMCertdConfig struct { AutoRegistration *bool `json:"auto_registration,omitempty"` } // SubManRHSMConfig defines model for SubManRHSMConfig. type SubManRHSMConfig struct { ManageRepos *bool `json:"manage_repos,omitempty"` } // Subscription defines model for Subscription. type Subscription struct { ActivationKey string `json:"activation_key"` BaseUrl string `json:"base_url"` Insights bool `json:"insights"` Organization string `json:"organization"` // Rhc Optional flag to use rhc to register the system, which also always enables Insights. Rhc *bool `json:"rhc,omitempty"` ServerUrl string `json:"server_url"` } // Timezone Timezone configuration type Timezone struct { // Ntpservers List of ntp servers Ntpservers *[]string `json:"ntpservers,omitempty"` // Timezone Name of the timezone, defaults to UTC Timezone *string `json:"timezone,omitempty"` } // UploadOptions Options for a given upload destination. // This should really be oneOf but AWSS3UploadOptions is a subset of // AWSEC2UploadOptions. This means that all AWSEC2UploadOptions objects // are also valid AWSS3UploadOptionas objects which violates the oneOf // rules. Therefore, we have to use anyOf here but be aware that it isn't // possible to mix and match more schemas together. type UploadOptions struct { union json.RawMessage } // UploadStatus defines model for UploadStatus. type UploadStatus struct { Options UploadStatus_Options `json:"options"` Status UploadStatusValue `json:"status"` Type UploadTypes `json:"type"` } // UploadStatus_Options defines model for UploadStatus.Options. type UploadStatus_Options struct { union json.RawMessage } // UploadStatusValue defines model for UploadStatusValue. type UploadStatusValue string // UploadTarget defines model for UploadTarget. type UploadTarget struct { Type UploadTypes `json:"type"` // UploadOptions Options for a given upload destination. // This should really be oneOf but AWSS3UploadOptions is a subset of // AWSEC2UploadOptions. This means that all AWSEC2UploadOptions objects // are also valid AWSS3UploadOptionas objects which violates the oneOf // rules. Therefore, we have to use anyOf here but be aware that it isn't // possible to mix and match more schemas together. UploadOptions UploadOptions `json:"upload_options"` } // UploadTypes defines model for UploadTypes. type UploadTypes string // User defines model for User. type User struct { Groups *[]string `json:"groups,omitempty"` Key *string `json:"key,omitempty"` Name string `json:"name"` // Password If the password starts with $6$, $5$, or $2b$ it will be stored as // an encrypted password. Otherwise it will be treated as a plain text // password. Password *string `json:"password,omitempty"` } // Page defines model for page. type Page = string // Size defines model for size. type Size = string // GetErrorListParams defines parameters for GetErrorList. type GetErrorListParams struct { // Page Page index Page *Page `form:"page,omitempty" json:"page,omitempty"` // Size Number of items in each page Size *Size `form:"size,omitempty" json:"size,omitempty"` } // PostComposeJSONRequestBody defines body for PostCompose for application/json ContentType. type PostComposeJSONRequestBody = ComposeRequest // PostCloneComposeJSONRequestBody defines body for PostCloneCompose for application/json ContentType. type PostCloneComposeJSONRequestBody = CloneComposeBody // PostDepsolveBlueprintJSONRequestBody defines body for PostDepsolveBlueprint for application/json ContentType. type PostDepsolveBlueprintJSONRequestBody = DepsolveRequest // PostSearchPackagesJSONRequestBody defines body for PostSearchPackages for application/json ContentType. type PostSearchPackagesJSONRequestBody = SearchPackagesRequest // AsBlueprintFileGroup0 returns the union data inside the BlueprintFile_Group as a BlueprintFileGroup0 func (t BlueprintFile_Group) AsBlueprintFileGroup0() (BlueprintFileGroup0, error) { var body BlueprintFileGroup0 err := json.Unmarshal(t.union, &body) return body, err } // FromBlueprintFileGroup0 overwrites any union data inside the BlueprintFile_Group as the provided BlueprintFileGroup0 func (t *BlueprintFile_Group) FromBlueprintFileGroup0(v BlueprintFileGroup0) error { b, err := json.Marshal(v) t.union = b return err } // MergeBlueprintFileGroup0 performs a merge with any union data inside the BlueprintFile_Group, using the provided BlueprintFileGroup0 func (t *BlueprintFile_Group) MergeBlueprintFileGroup0(v BlueprintFileGroup0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsBlueprintFileGroup1 returns the union data inside the BlueprintFile_Group as a BlueprintFileGroup1 func (t BlueprintFile_Group) AsBlueprintFileGroup1() (BlueprintFileGroup1, error) { var body BlueprintFileGroup1 err := json.Unmarshal(t.union, &body) return body, err } // FromBlueprintFileGroup1 overwrites any union data inside the BlueprintFile_Group as the provided BlueprintFileGroup1 func (t *BlueprintFile_Group) FromBlueprintFileGroup1(v BlueprintFileGroup1) error { b, err := json.Marshal(v) t.union = b return err } // MergeBlueprintFileGroup1 performs a merge with any union data inside the BlueprintFile_Group, using the provided BlueprintFileGroup1 func (t *BlueprintFile_Group) MergeBlueprintFileGroup1(v BlueprintFileGroup1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t BlueprintFile_Group) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *BlueprintFile_Group) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsBlueprintFileUser0 returns the union data inside the BlueprintFile_User as a BlueprintFileUser0 func (t BlueprintFile_User) AsBlueprintFileUser0() (BlueprintFileUser0, error) { var body BlueprintFileUser0 err := json.Unmarshal(t.union, &body) return body, err } // FromBlueprintFileUser0 overwrites any union data inside the BlueprintFile_User as the provided BlueprintFileUser0 func (t *BlueprintFile_User) FromBlueprintFileUser0(v BlueprintFileUser0) error { b, err := json.Marshal(v) t.union = b return err } // MergeBlueprintFileUser0 performs a merge with any union data inside the BlueprintFile_User, using the provided BlueprintFileUser0 func (t *BlueprintFile_User) MergeBlueprintFileUser0(v BlueprintFileUser0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsBlueprintFileUser1 returns the union data inside the BlueprintFile_User as a BlueprintFileUser1 func (t BlueprintFile_User) AsBlueprintFileUser1() (BlueprintFileUser1, error) { var body BlueprintFileUser1 err := json.Unmarshal(t.union, &body) return body, err } // FromBlueprintFileUser1 overwrites any union data inside the BlueprintFile_User as the provided BlueprintFileUser1 func (t *BlueprintFile_User) FromBlueprintFileUser1(v BlueprintFileUser1) error { b, err := json.Marshal(v) t.union = b return err } // MergeBlueprintFileUser1 performs a merge with any union data inside the BlueprintFile_User, using the provided BlueprintFileUser1 func (t *BlueprintFile_User) MergeBlueprintFileUser1(v BlueprintFileUser1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t BlueprintFile_User) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *BlueprintFile_User) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsAWSEC2CloneCompose returns the union data inside the CloneComposeBody as a AWSEC2CloneCompose func (t CloneComposeBody) AsAWSEC2CloneCompose() (AWSEC2CloneCompose, error) { var body AWSEC2CloneCompose err := json.Unmarshal(t.union, &body) return body, err } // FromAWSEC2CloneCompose overwrites any union data inside the CloneComposeBody as the provided AWSEC2CloneCompose func (t *CloneComposeBody) FromAWSEC2CloneCompose(v AWSEC2CloneCompose) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSEC2CloneCompose performs a merge with any union data inside the CloneComposeBody, using the provided AWSEC2CloneCompose func (t *CloneComposeBody) MergeAWSEC2CloneCompose(v AWSEC2CloneCompose) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t CloneComposeBody) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *CloneComposeBody) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsAWSEC2UploadStatus returns the union data inside the CloneStatus_Options as a AWSEC2UploadStatus func (t CloneStatus_Options) AsAWSEC2UploadStatus() (AWSEC2UploadStatus, error) { var body AWSEC2UploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAWSEC2UploadStatus overwrites any union data inside the CloneStatus_Options as the provided AWSEC2UploadStatus func (t *CloneStatus_Options) FromAWSEC2UploadStatus(v AWSEC2UploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSEC2UploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided AWSEC2UploadStatus func (t *CloneStatus_Options) MergeAWSEC2UploadStatus(v AWSEC2UploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAWSS3UploadStatus returns the union data inside the CloneStatus_Options as a AWSS3UploadStatus func (t CloneStatus_Options) AsAWSS3UploadStatus() (AWSS3UploadStatus, error) { var body AWSS3UploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAWSS3UploadStatus overwrites any union data inside the CloneStatus_Options as the provided AWSS3UploadStatus func (t *CloneStatus_Options) FromAWSS3UploadStatus(v AWSS3UploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSS3UploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided AWSS3UploadStatus func (t *CloneStatus_Options) MergeAWSS3UploadStatus(v AWSS3UploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsGCPUploadStatus returns the union data inside the CloneStatus_Options as a GCPUploadStatus func (t CloneStatus_Options) AsGCPUploadStatus() (GCPUploadStatus, error) { var body GCPUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromGCPUploadStatus overwrites any union data inside the CloneStatus_Options as the provided GCPUploadStatus func (t *CloneStatus_Options) FromGCPUploadStatus(v GCPUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeGCPUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided GCPUploadStatus func (t *CloneStatus_Options) MergeGCPUploadStatus(v GCPUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAzureUploadStatus returns the union data inside the CloneStatus_Options as a AzureUploadStatus func (t CloneStatus_Options) AsAzureUploadStatus() (AzureUploadStatus, error) { var body AzureUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAzureUploadStatus overwrites any union data inside the CloneStatus_Options as the provided AzureUploadStatus func (t *CloneStatus_Options) FromAzureUploadStatus(v AzureUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAzureUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided AzureUploadStatus func (t *CloneStatus_Options) MergeAzureUploadStatus(v AzureUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsContainerUploadStatus returns the union data inside the CloneStatus_Options as a ContainerUploadStatus func (t CloneStatus_Options) AsContainerUploadStatus() (ContainerUploadStatus, error) { var body ContainerUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromContainerUploadStatus overwrites any union data inside the CloneStatus_Options as the provided ContainerUploadStatus func (t *CloneStatus_Options) FromContainerUploadStatus(v ContainerUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeContainerUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided ContainerUploadStatus func (t *CloneStatus_Options) MergeContainerUploadStatus(v ContainerUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsOCIUploadStatus returns the union data inside the CloneStatus_Options as a OCIUploadStatus func (t CloneStatus_Options) AsOCIUploadStatus() (OCIUploadStatus, error) { var body OCIUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromOCIUploadStatus overwrites any union data inside the CloneStatus_Options as the provided OCIUploadStatus func (t *CloneStatus_Options) FromOCIUploadStatus(v OCIUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeOCIUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided OCIUploadStatus func (t *CloneStatus_Options) MergeOCIUploadStatus(v OCIUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsPulpOSTreeUploadStatus returns the union data inside the CloneStatus_Options as a PulpOSTreeUploadStatus func (t CloneStatus_Options) AsPulpOSTreeUploadStatus() (PulpOSTreeUploadStatus, error) { var body PulpOSTreeUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromPulpOSTreeUploadStatus overwrites any union data inside the CloneStatus_Options as the provided PulpOSTreeUploadStatus func (t *CloneStatus_Options) FromPulpOSTreeUploadStatus(v PulpOSTreeUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergePulpOSTreeUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided PulpOSTreeUploadStatus func (t *CloneStatus_Options) MergePulpOSTreeUploadStatus(v PulpOSTreeUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsLocalUploadStatus returns the union data inside the CloneStatus_Options as a LocalUploadStatus func (t CloneStatus_Options) AsLocalUploadStatus() (LocalUploadStatus, error) { var body LocalUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromLocalUploadStatus overwrites any union data inside the CloneStatus_Options as the provided LocalUploadStatus func (t *CloneStatus_Options) FromLocalUploadStatus(v LocalUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeLocalUploadStatus performs a merge with any union data inside the CloneStatus_Options, using the provided LocalUploadStatus func (t *CloneStatus_Options) MergeLocalUploadStatus(v LocalUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t CloneStatus_Options) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *CloneStatus_Options) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsDirectoryGroup0 returns the union data inside the Directory_Group as a DirectoryGroup0 func (t Directory_Group) AsDirectoryGroup0() (DirectoryGroup0, error) { var body DirectoryGroup0 err := json.Unmarshal(t.union, &body) return body, err } // FromDirectoryGroup0 overwrites any union data inside the Directory_Group as the provided DirectoryGroup0 func (t *Directory_Group) FromDirectoryGroup0(v DirectoryGroup0) error { b, err := json.Marshal(v) t.union = b return err } // MergeDirectoryGroup0 performs a merge with any union data inside the Directory_Group, using the provided DirectoryGroup0 func (t *Directory_Group) MergeDirectoryGroup0(v DirectoryGroup0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsDirectoryGroup1 returns the union data inside the Directory_Group as a DirectoryGroup1 func (t Directory_Group) AsDirectoryGroup1() (DirectoryGroup1, error) { var body DirectoryGroup1 err := json.Unmarshal(t.union, &body) return body, err } // FromDirectoryGroup1 overwrites any union data inside the Directory_Group as the provided DirectoryGroup1 func (t *Directory_Group) FromDirectoryGroup1(v DirectoryGroup1) error { b, err := json.Marshal(v) t.union = b return err } // MergeDirectoryGroup1 performs a merge with any union data inside the Directory_Group, using the provided DirectoryGroup1 func (t *Directory_Group) MergeDirectoryGroup1(v DirectoryGroup1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t Directory_Group) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *Directory_Group) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsDirectoryUser0 returns the union data inside the Directory_User as a DirectoryUser0 func (t Directory_User) AsDirectoryUser0() (DirectoryUser0, error) { var body DirectoryUser0 err := json.Unmarshal(t.union, &body) return body, err } // FromDirectoryUser0 overwrites any union data inside the Directory_User as the provided DirectoryUser0 func (t *Directory_User) FromDirectoryUser0(v DirectoryUser0) error { b, err := json.Marshal(v) t.union = b return err } // MergeDirectoryUser0 performs a merge with any union data inside the Directory_User, using the provided DirectoryUser0 func (t *Directory_User) MergeDirectoryUser0(v DirectoryUser0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsDirectoryUser1 returns the union data inside the Directory_User as a DirectoryUser1 func (t Directory_User) AsDirectoryUser1() (DirectoryUser1, error) { var body DirectoryUser1 err := json.Unmarshal(t.union, &body) return body, err } // FromDirectoryUser1 overwrites any union data inside the Directory_User as the provided DirectoryUser1 func (t *Directory_User) FromDirectoryUser1(v DirectoryUser1) error { b, err := json.Marshal(v) t.union = b return err } // MergeDirectoryUser1 performs a merge with any union data inside the Directory_User, using the provided DirectoryUser1 func (t *Directory_User) MergeDirectoryUser1(v DirectoryUser1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t Directory_User) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *Directory_User) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsFileGroup0 returns the union data inside the File_Group as a FileGroup0 func (t File_Group) AsFileGroup0() (FileGroup0, error) { var body FileGroup0 err := json.Unmarshal(t.union, &body) return body, err } // FromFileGroup0 overwrites any union data inside the File_Group as the provided FileGroup0 func (t *File_Group) FromFileGroup0(v FileGroup0) error { b, err := json.Marshal(v) t.union = b return err } // MergeFileGroup0 performs a merge with any union data inside the File_Group, using the provided FileGroup0 func (t *File_Group) MergeFileGroup0(v FileGroup0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsFileGroup1 returns the union data inside the File_Group as a FileGroup1 func (t File_Group) AsFileGroup1() (FileGroup1, error) { var body FileGroup1 err := json.Unmarshal(t.union, &body) return body, err } // FromFileGroup1 overwrites any union data inside the File_Group as the provided FileGroup1 func (t *File_Group) FromFileGroup1(v FileGroup1) error { b, err := json.Marshal(v) t.union = b return err } // MergeFileGroup1 performs a merge with any union data inside the File_Group, using the provided FileGroup1 func (t *File_Group) MergeFileGroup1(v FileGroup1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t File_Group) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *File_Group) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsFileUser0 returns the union data inside the File_User as a FileUser0 func (t File_User) AsFileUser0() (FileUser0, error) { var body FileUser0 err := json.Unmarshal(t.union, &body) return body, err } // FromFileUser0 overwrites any union data inside the File_User as the provided FileUser0 func (t *File_User) FromFileUser0(v FileUser0) error { b, err := json.Marshal(v) t.union = b return err } // MergeFileUser0 performs a merge with any union data inside the File_User, using the provided FileUser0 func (t *File_User) MergeFileUser0(v FileUser0) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsFileUser1 returns the union data inside the File_User as a FileUser1 func (t File_User) AsFileUser1() (FileUser1, error) { var body FileUser1 err := json.Unmarshal(t.union, &body) return body, err } // FromFileUser1 overwrites any union data inside the File_User as the provided FileUser1 func (t *File_User) FromFileUser1(v FileUser1) error { b, err := json.Marshal(v) t.union = b return err } // MergeFileUser1 performs a merge with any union data inside the File_User, using the provided FileUser1 func (t *File_User) MergeFileUser1(v FileUser1) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t File_User) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *File_User) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsAWSEC2UploadOptions returns the union data inside the UploadOptions as a AWSEC2UploadOptions func (t UploadOptions) AsAWSEC2UploadOptions() (AWSEC2UploadOptions, error) { var body AWSEC2UploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromAWSEC2UploadOptions overwrites any union data inside the UploadOptions as the provided AWSEC2UploadOptions func (t *UploadOptions) FromAWSEC2UploadOptions(v AWSEC2UploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSEC2UploadOptions performs a merge with any union data inside the UploadOptions, using the provided AWSEC2UploadOptions func (t *UploadOptions) MergeAWSEC2UploadOptions(v AWSEC2UploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAWSS3UploadOptions returns the union data inside the UploadOptions as a AWSS3UploadOptions func (t UploadOptions) AsAWSS3UploadOptions() (AWSS3UploadOptions, error) { var body AWSS3UploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromAWSS3UploadOptions overwrites any union data inside the UploadOptions as the provided AWSS3UploadOptions func (t *UploadOptions) FromAWSS3UploadOptions(v AWSS3UploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSS3UploadOptions performs a merge with any union data inside the UploadOptions, using the provided AWSS3UploadOptions func (t *UploadOptions) MergeAWSS3UploadOptions(v AWSS3UploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsGCPUploadOptions returns the union data inside the UploadOptions as a GCPUploadOptions func (t UploadOptions) AsGCPUploadOptions() (GCPUploadOptions, error) { var body GCPUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromGCPUploadOptions overwrites any union data inside the UploadOptions as the provided GCPUploadOptions func (t *UploadOptions) FromGCPUploadOptions(v GCPUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeGCPUploadOptions performs a merge with any union data inside the UploadOptions, using the provided GCPUploadOptions func (t *UploadOptions) MergeGCPUploadOptions(v GCPUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAzureUploadOptions returns the union data inside the UploadOptions as a AzureUploadOptions func (t UploadOptions) AsAzureUploadOptions() (AzureUploadOptions, error) { var body AzureUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromAzureUploadOptions overwrites any union data inside the UploadOptions as the provided AzureUploadOptions func (t *UploadOptions) FromAzureUploadOptions(v AzureUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeAzureUploadOptions performs a merge with any union data inside the UploadOptions, using the provided AzureUploadOptions func (t *UploadOptions) MergeAzureUploadOptions(v AzureUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsContainerUploadOptions returns the union data inside the UploadOptions as a ContainerUploadOptions func (t UploadOptions) AsContainerUploadOptions() (ContainerUploadOptions, error) { var body ContainerUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromContainerUploadOptions overwrites any union data inside the UploadOptions as the provided ContainerUploadOptions func (t *UploadOptions) FromContainerUploadOptions(v ContainerUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeContainerUploadOptions performs a merge with any union data inside the UploadOptions, using the provided ContainerUploadOptions func (t *UploadOptions) MergeContainerUploadOptions(v ContainerUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsLocalUploadOptions returns the union data inside the UploadOptions as a LocalUploadOptions func (t UploadOptions) AsLocalUploadOptions() (LocalUploadOptions, error) { var body LocalUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromLocalUploadOptions overwrites any union data inside the UploadOptions as the provided LocalUploadOptions func (t *UploadOptions) FromLocalUploadOptions(v LocalUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeLocalUploadOptions performs a merge with any union data inside the UploadOptions, using the provided LocalUploadOptions func (t *UploadOptions) MergeLocalUploadOptions(v LocalUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsOCIUploadOptions returns the union data inside the UploadOptions as a OCIUploadOptions func (t UploadOptions) AsOCIUploadOptions() (OCIUploadOptions, error) { var body OCIUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromOCIUploadOptions overwrites any union data inside the UploadOptions as the provided OCIUploadOptions func (t *UploadOptions) FromOCIUploadOptions(v OCIUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergeOCIUploadOptions performs a merge with any union data inside the UploadOptions, using the provided OCIUploadOptions func (t *UploadOptions) MergeOCIUploadOptions(v OCIUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsPulpOSTreeUploadOptions returns the union data inside the UploadOptions as a PulpOSTreeUploadOptions func (t UploadOptions) AsPulpOSTreeUploadOptions() (PulpOSTreeUploadOptions, error) { var body PulpOSTreeUploadOptions err := json.Unmarshal(t.union, &body) return body, err } // FromPulpOSTreeUploadOptions overwrites any union data inside the UploadOptions as the provided PulpOSTreeUploadOptions func (t *UploadOptions) FromPulpOSTreeUploadOptions(v PulpOSTreeUploadOptions) error { b, err := json.Marshal(v) t.union = b return err } // MergePulpOSTreeUploadOptions performs a merge with any union data inside the UploadOptions, using the provided PulpOSTreeUploadOptions func (t *UploadOptions) MergePulpOSTreeUploadOptions(v PulpOSTreeUploadOptions) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t UploadOptions) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *UploadOptions) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // AsAWSEC2UploadStatus returns the union data inside the UploadStatus_Options as a AWSEC2UploadStatus func (t UploadStatus_Options) AsAWSEC2UploadStatus() (AWSEC2UploadStatus, error) { var body AWSEC2UploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAWSEC2UploadStatus overwrites any union data inside the UploadStatus_Options as the provided AWSEC2UploadStatus func (t *UploadStatus_Options) FromAWSEC2UploadStatus(v AWSEC2UploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSEC2UploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided AWSEC2UploadStatus func (t *UploadStatus_Options) MergeAWSEC2UploadStatus(v AWSEC2UploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAWSS3UploadStatus returns the union data inside the UploadStatus_Options as a AWSS3UploadStatus func (t UploadStatus_Options) AsAWSS3UploadStatus() (AWSS3UploadStatus, error) { var body AWSS3UploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAWSS3UploadStatus overwrites any union data inside the UploadStatus_Options as the provided AWSS3UploadStatus func (t *UploadStatus_Options) FromAWSS3UploadStatus(v AWSS3UploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAWSS3UploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided AWSS3UploadStatus func (t *UploadStatus_Options) MergeAWSS3UploadStatus(v AWSS3UploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsGCPUploadStatus returns the union data inside the UploadStatus_Options as a GCPUploadStatus func (t UploadStatus_Options) AsGCPUploadStatus() (GCPUploadStatus, error) { var body GCPUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromGCPUploadStatus overwrites any union data inside the UploadStatus_Options as the provided GCPUploadStatus func (t *UploadStatus_Options) FromGCPUploadStatus(v GCPUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeGCPUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided GCPUploadStatus func (t *UploadStatus_Options) MergeGCPUploadStatus(v GCPUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsAzureUploadStatus returns the union data inside the UploadStatus_Options as a AzureUploadStatus func (t UploadStatus_Options) AsAzureUploadStatus() (AzureUploadStatus, error) { var body AzureUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromAzureUploadStatus overwrites any union data inside the UploadStatus_Options as the provided AzureUploadStatus func (t *UploadStatus_Options) FromAzureUploadStatus(v AzureUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeAzureUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided AzureUploadStatus func (t *UploadStatus_Options) MergeAzureUploadStatus(v AzureUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsContainerUploadStatus returns the union data inside the UploadStatus_Options as a ContainerUploadStatus func (t UploadStatus_Options) AsContainerUploadStatus() (ContainerUploadStatus, error) { var body ContainerUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromContainerUploadStatus overwrites any union data inside the UploadStatus_Options as the provided ContainerUploadStatus func (t *UploadStatus_Options) FromContainerUploadStatus(v ContainerUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeContainerUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided ContainerUploadStatus func (t *UploadStatus_Options) MergeContainerUploadStatus(v ContainerUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsOCIUploadStatus returns the union data inside the UploadStatus_Options as a OCIUploadStatus func (t UploadStatus_Options) AsOCIUploadStatus() (OCIUploadStatus, error) { var body OCIUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromOCIUploadStatus overwrites any union data inside the UploadStatus_Options as the provided OCIUploadStatus func (t *UploadStatus_Options) FromOCIUploadStatus(v OCIUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeOCIUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided OCIUploadStatus func (t *UploadStatus_Options) MergeOCIUploadStatus(v OCIUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsPulpOSTreeUploadStatus returns the union data inside the UploadStatus_Options as a PulpOSTreeUploadStatus func (t UploadStatus_Options) AsPulpOSTreeUploadStatus() (PulpOSTreeUploadStatus, error) { var body PulpOSTreeUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromPulpOSTreeUploadStatus overwrites any union data inside the UploadStatus_Options as the provided PulpOSTreeUploadStatus func (t *UploadStatus_Options) FromPulpOSTreeUploadStatus(v PulpOSTreeUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergePulpOSTreeUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided PulpOSTreeUploadStatus func (t *UploadStatus_Options) MergePulpOSTreeUploadStatus(v PulpOSTreeUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } // AsLocalUploadStatus returns the union data inside the UploadStatus_Options as a LocalUploadStatus func (t UploadStatus_Options) AsLocalUploadStatus() (LocalUploadStatus, error) { var body LocalUploadStatus err := json.Unmarshal(t.union, &body) return body, err } // FromLocalUploadStatus overwrites any union data inside the UploadStatus_Options as the provided LocalUploadStatus func (t *UploadStatus_Options) FromLocalUploadStatus(v LocalUploadStatus) error { b, err := json.Marshal(v) t.union = b return err } // MergeLocalUploadStatus performs a merge with any union data inside the UploadStatus_Options, using the provided LocalUploadStatus func (t *UploadStatus_Options) MergeLocalUploadStatus(v LocalUploadStatus) error { b, err := json.Marshal(v) if err != nil { return err } merged, err := runtime.JSONMerge(t.union, b) t.union = merged return err } func (t UploadStatus_Options) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } func (t *UploadStatus_Options) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } // ServerInterface represents all server handlers. type ServerInterface interface { // The status of a cloned compose // (GET /clones/{id}) GetCloneStatus(ctx echo.Context, id openapi_types.UUID) error // Create compose // (POST /compose) PostCompose(ctx echo.Context) error // The list of composes // (GET /composes/) GetComposeList(ctx echo.Context) error // The status of a compose // (GET /composes/{id}) GetComposeStatus(ctx echo.Context, id openapi_types.UUID) error // Clone an existing compose // (POST /composes/{id}/clone) PostCloneCompose(ctx echo.Context, id openapi_types.UUID) error // Download the artifact for a compose. // (GET /composes/{id}/download) GetComposeDownload(ctx echo.Context, id openapi_types.UUID) error // Get logs for a compose. // (GET /composes/{id}/logs) GetComposeLogs(ctx echo.Context, id openapi_types.UUID) error // Get the manifests for a compose. // (GET /composes/{id}/manifests) GetComposeManifests(ctx echo.Context, id openapi_types.UUID) error // Get the metadata for a compose. // (GET /composes/{id}/metadata) GetComposeMetadata(ctx echo.Context, id openapi_types.UUID) error // Get the SBOMs for a compose. // (GET /composes/{id}/sboms) GetComposeSBOMs(ctx echo.Context, id openapi_types.UUID) error // Depsolve one or more blueprints // (POST /depsolve/blueprint) PostDepsolveBlueprint(ctx echo.Context) error // Get all of the supported distribution repository details // (GET /distributions) GetDistributionList(ctx echo.Context) error // Get a list of all possible errors // (GET /errors) GetErrorList(ctx echo.Context, params GetErrorListParams) error // Get error description // (GET /errors/{id}) GetError(ctx echo.Context, id string) error // Get the openapi spec in json format // (GET /openapi) GetOpenapi(ctx echo.Context) error // Search for detailed information on a list of package names // (POST /search/packages) PostSearchPackages(ctx echo.Context) error } // ServerInterfaceWrapper converts echo contexts to parameters. type ServerInterfaceWrapper struct { Handler ServerInterface } // GetCloneStatus converts echo context to params. func (w *ServerInterfaceWrapper) GetCloneStatus(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetCloneStatus(ctx, id) return err } // PostCompose converts echo context to params. func (w *ServerInterfaceWrapper) PostCompose(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.PostCompose(ctx) return err } // GetComposeList converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeList(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeList(ctx) return err } // GetComposeStatus converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeStatus(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeStatus(ctx, id) return err } // PostCloneCompose converts echo context to params. func (w *ServerInterfaceWrapper) PostCloneCompose(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } // Invoke the callback with all the unmarshaled arguments err = w.Handler.PostCloneCompose(ctx, id) return err } // GetComposeDownload converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeDownload(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeDownload(ctx, id) return err } // GetComposeLogs converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeLogs(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeLogs(ctx, id) return err } // GetComposeManifests converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeManifests(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeManifests(ctx, id) return err } // GetComposeMetadata converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeMetadata(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeMetadata(ctx, id) return err } // GetComposeSBOMs converts echo context to params. func (w *ServerInterfaceWrapper) GetComposeSBOMs(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id openapi_types.UUID err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetComposeSBOMs(ctx, id) return err } // PostDepsolveBlueprint converts echo context to params. func (w *ServerInterfaceWrapper) PostDepsolveBlueprint(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.PostDepsolveBlueprint(ctx) return err } // GetDistributionList converts echo context to params. func (w *ServerInterfaceWrapper) GetDistributionList(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetDistributionList(ctx) return err } // GetErrorList converts echo context to params. func (w *ServerInterfaceWrapper) GetErrorList(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Parameter object where we will unmarshal all parameters from the context var params GetErrorListParams // ------------- Optional query parameter "page" ------------- err = runtime.BindQueryParameter("form", true, false, "page", ctx.QueryParams(), ¶ms.Page) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter page: %s", err)) } // ------------- Optional query parameter "size" ------------- err = runtime.BindQueryParameter("form", true, false, "size", ctx.QueryParams(), ¶ms.Size) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter size: %s", err)) } // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetErrorList(ctx, params) return err } // GetError converts echo context to params. func (w *ServerInterfaceWrapper) GetError(ctx echo.Context) error { var err error // ------------- Path parameter "id" ------------- var id string err = runtime.BindStyledParameterWithOptions("simple", "id", ctx.Param("id"), &id, runtime.BindStyledParameterOptions{ParamLocation: runtime.ParamLocationPath, Explode: false, Required: true}) if err != nil { return echo.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Invalid format for parameter id: %s", err)) } ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetError(ctx, id) return err } // GetOpenapi converts echo context to params. func (w *ServerInterfaceWrapper) GetOpenapi(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.GetOpenapi(ctx) return err } // PostSearchPackages converts echo context to params. func (w *ServerInterfaceWrapper) PostSearchPackages(ctx echo.Context) error { var err error ctx.Set(BearerScopes, []string{}) // Invoke the callback with all the unmarshaled arguments err = w.Handler.PostSearchPackages(ctx) return err } // This is a simple interface which specifies echo.Route addition functions which // are present on both echo.Echo and echo.Group, since we want to allow using // either of them for path registration type EchoRouter interface { CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route } // RegisterHandlers adds each server route to the EchoRouter. func RegisterHandlers(router EchoRouter, si ServerInterface) { RegisterHandlersWithBaseURL(router, si, "") } // Registers handlers, and prepends BaseURL to the paths, so that the paths // can be served under a prefix. func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string) { wrapper := ServerInterfaceWrapper{ Handler: si, } router.GET(baseURL+"/clones/:id", wrapper.GetCloneStatus) router.POST(baseURL+"/compose", wrapper.PostCompose) router.GET(baseURL+"/composes/", wrapper.GetComposeList) router.GET(baseURL+"/composes/:id", wrapper.GetComposeStatus) router.POST(baseURL+"/composes/:id/clone", wrapper.PostCloneCompose) router.GET(baseURL+"/composes/:id/download", wrapper.GetComposeDownload) router.GET(baseURL+"/composes/:id/logs", wrapper.GetComposeLogs) router.GET(baseURL+"/composes/:id/manifests", wrapper.GetComposeManifests) router.GET(baseURL+"/composes/:id/metadata", wrapper.GetComposeMetadata) router.GET(baseURL+"/composes/:id/sboms", wrapper.GetComposeSBOMs) router.POST(baseURL+"/depsolve/blueprint", wrapper.PostDepsolveBlueprint) router.GET(baseURL+"/distributions", wrapper.GetDistributionList) router.GET(baseURL+"/errors", wrapper.GetErrorList) router.GET(baseURL+"/errors/:id", wrapper.GetError) router.GET(baseURL+"/openapi", wrapper.GetOpenapi) router.POST(baseURL+"/search/packages", wrapper.PostSearchPackages) } // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ "H4sIAAAAAAAC/+x9eXPbOPLoV0HppSqTF923XTX1e7J8ybZ8yUfsVcoLkZAEiwQYAJQszy/f/RUOUqRE", "XXEyu9n1H7sTizgaDXSjb/yVsqjrUYKI4Kndv1IeZNBFAjHz1wDJ/9qIWwx7AlOS2k1dwgECmNjoJZVO", "oRfoeg6KNR9Dx0ep3VQh9f17OoVln28+YtNUOkWgK7+olukUt4bIhbKLmHrydy4YJgPVjePXhLnPfbeH", "GKB9gAVyOcAEIGgNgRkwCk0wQAhNPr8UHtV2FTzfg49q6MZ956BZbDqUoKZEH1cTQdvGEkzoXDLqISaw", "BKQPHY7SKS/y018phgZqPQsTpVN8CBl6mmAxfIKWRX2zMWZlqd1/pArFUrlSrdV38oVi6ms6pTCROJb5", "ATIGp2rtDH3zMUO2HMbA8DVsRnvPyBKyn17fredQaF8o1PMfXmAIeAr5mQniIlNIpf/OZadTnECPD6l4", "0rsdhcmdZoKvi1AlIywZ1nVo7AgofE0lMURBF8chgi7O5K16KV/bKdVqlcpOxS73kjC2JYrnFiPnTa85", "A53SW46A5/ccbGkS7kPfEWG7OEm3+oAjAQQF6jP4QwwRMF2AIt5PaQCBQ8kgDWiv73MLCmSD2+uzLsEc", "MCR8RpCdBS3BAXrxMINyaODiwVCAHgKcUoIYEENIQJ8yQMUQMeCrtXWJgGyABM92SZfMYBHMR3JaPqRM", "ICZnA5HJACR2l+D4hJgDCTuHLgKQq6nk39HpwGy22Rb1KHUQJG/f1M22c9lR9JmTzIqjU8hGieO/+gy9", "5bgMpx5iT+OnASJI4zN2dFJ3cvnxk9McUsqRwvFdG7RceS8dy2HuwGyUNLBxv48YIgL0ERQ+QxxQAhTA", "AMr/jSF2YM9BXWIjDxEbk4FsIcddGE5vHCK+K7GhgLorRjAyo08s4QlZztw1Jo8I7asp9MFANlAd5CkG", "rs/VwfUJ/ubLu1Y1HOAxIoAhTn1mITBg1Pey6szKSeTpoy4WkjT6jLqqi9w5xIU8yAwSm7qAEgR6kCNb", "rhCC29vWPsC8S8wKkW0WGOWQCrAkFuRQK7JT0QWemS/BIj1Gx1guMgD/SYGfBpMhYnoL1SyS3nzHVosP", "8AKJ7DbAXCCm4DumE0miDuYCQMcBARh8t0uGQnh8N5ezqcWzLrYY5bQvshZ1c4hkfJ6zHJyDcu9zhnf/", "zxijyZ/qp4zl4IwDBeLi/8DXgLk/yYmewkk+KpRLiIOfJOoJFYB7yMJ9jOw0wEL+aCPbt2IbsgQP80iX", "9I58SR/JnD/ad/Xpih+XDdA9D8oN9S1Irs0wR2rGpPvb74UgPGF7EajWvgQp2uwHgCmjil3vFa0M7BXL", "mXK5UMrs5K1KploolvJVVM/voGISdAIRSMQKuCQQutFmUJkj2MfEVnutKVTzlEvKBHQ2OYvBORR4jDI2", "ZsgSlE1zfZ/Y0EVEQIcvfM0M6SQjaEZOndEgzyGpYtVQv9KrZgpWqZ8p2zCfgdViMZPv5av5YmnHrtm1", "tVfJDGOLe7twAtdcCMsunDiH3ITlzAEZGSAJhD3HRx7DRGx5FVmUCIiJUYLm7pzgmz4dXJ4C5PYk+yZS", "bBgieSigAyATfWhJqTIUVD8w1E/tpv5PbqZz5YxWkQvHTRJgLZ8L6uJXGF6sq4YKl92Md/s+d38mSM42", "5oLRxVXfSJFMfsM9X5GuoMDnKBRxLK0FZUGrDxzUFwC5npiqT0PKRZfogcEEO46iJL5I231kUwYzpZ0k", "AkZEXtD2k0tt3+h3G6G1rdon4VSdXJ6k3VojSfb6u1xoT97AXEDHQfam22lG0ewyYfbIOuLTNwiADjbS", "o6dH4Wkpd8rTYaufe9AaTSCzucI7FLCHHSymCp/bQJcEWECNCzsQwLIUY2/FVRI0Y8R4onzRABy5Y8SA", "aQGIMgzEDlQtW8vW8muZyHr20Vwgv22YCbQQE+vpv9GUzWJTaYrUfB8nYX5/9lEi32IIilBcDNkQ3oYP", "BUNOk7ajb9N1/Q/3L1RLnHi6D+XPPwvUcH/kqIngytmmXCA3QUCVwiPtg1kb4Ephz6OYiAiIPwSMmTQR", "pCSec6C4GzhsXXaAS22UqBr2MUMT6DhbQGI6BNxuORZmzG67VS/lb5LrJ6s+TUr6eKC0sOB6UA2TNKgB", "wcFVtQqKVtBOWfcU91H082SjMbbWqF/RDkB3SAPLZ1JhdKaAEmcqr6u+74S3HbIHKMOx6zlK2s8EHI8B", "uYS5ay1no3GO2zBxgUHHtSsMG35Pp0aIEbT2GJzqVkZLc9C69me61fd0inqIcAt6Gx+0Cw+RTrNxqa8J", "JtRmYDJ4Umc5psVDX9CMM3YXdPkOcpAlwFDK1VrYGBn5O5AZwpGRnQUfg4E+6u9SGGFwAnziIM67RCgh", "Xmr3UuGlDLiUoRiFY6l/YGsILMiRlOHDcc7u2lnwUY0NnQmc8i7xOeLy9zRAUgefDJFiXGYKQgF6EQxG", "x8+CjwxOPgLVU0IWgs+7JGmQJXDG7Q0MTlLplMZfiMqviSqiRzledm9cR75Kop8wLJD8Rw4JKzf13azq", "n7VzcQ5tLBTnVCCJYijkNx4gQSixDkABej52bCCwi7KbCyXhcQqhS7yD2JC764a6Pu60F25S5q3vd7nY", "jSMmecJa8DtBO9mHD0doupzdcj4EIzTlm6Km0zk+RYnYkDh+pWQtdd8E7b6nUz7XDCcZNvn1LfffLU/S", "Yb6vkq/U/Z0g4mm1R13R62QGfc7ikpcNBUxW4CTkAf9Xo0MOPAfKkdGLSOLUS+5Pdf/NjwTBANuSlqEx", "upj7bXYnMKrcDZSgi35q9x+L0nb4CyYCDSRCv2qlIckdh5iLuRSCOdADhBeVgggTQC0B1fXlQhEDJF8t", "l5OW60ExTBL0xRCESq4TX5NiHe7U/L4wYvKhu5gQ7c2L488P8Cd7/ST0zUn9aoVf153KmfQYP1ouJsn+", "SflrdD1GtMQE9KYC8egyioVyrVwvVcv1dOolM6AZA4qPiaiWtZIYXANx+0huDNlatSbSOR3Cu2bBMwFz", "pYIzL9PrbjawjGineeeCP4gaNSiZ76jP4A+p9VImAINkgPgnZfz1GBXUoo5iS1I6iaLxH6licVdYXiqd", "qufNP7ALPfXP7dyEG3L6YMFRji956+ZWiWCER9VrO2YZClsLh1LyOy4Ygm7icp85JU8CYoeqX9aAGExz", "0rk4vwk7SdZAHWxNE02pl76Q1BuawYFuC1r7AdOWFzOQ/JqnAZeMBAoAyVQL4cSSolJo6AeCdok8t4Oh", "4KEUKKUeFwpsQceZyhNHkLKwG7YkV+JgOVQwuZnZooRTx8gjhhPupnxfmTMX+R+jknrNKhdPzrZYjGBw", "ng/NZlpJnBGhaGHje5Ajnznx8zdjF4EZ2rJJliF7CLUJ2tIXYc7GXOTYEDn1XD33Uq8+Vcs5OSLlOcpz", "MWwxnGhcn6MjY6uLYC6mxTpoqY1p4A2sIbJGyV0H3kAJTdFVrgVmyQ66SEAHk1EyplzMGGU8q02SHqNy", "O7KUDXJBv/+RAvKfgcmy2PXz+WIVMmv4p8bgBmjTkziYi0UgQhjk56yFiKBczf8/DDkIcvRnPaNJPTIz", "lP9fLetfFHx7kKOLziawKHPk05CKPn5JtjRxuakcqJaQYTGV95tAEXlDuceDU7rMwb3cvsgwlcNGPoa3", "t9ZnnlYfD86dMWK4P036PO84WENtt0Za2cLOt860PkjimFp+xHZgT5d8EEE7kCACvTmdgJFl9uuG9ovS", "PpgBH7HvQNvWXmcpWQkaFe9nR1A1L2xC60OaZOO5MRN85EA2AKHzKmnIRE1Jakg6gEQqSjHpj/NhBtnF", "SqWwAxqNRqNZOn+FzYLzuN8qnN8cVORvrXN2dHrA2g/4c7t9O/GP4XXjxL0+o63X637x237R3q+85vdu", "XnLVlySYFn1ScjmFZFGZ8wllSZ5F4/o2DQAXkKmbTAzBh+qHNPhQ+ZCWcu6HYu9DaIHoIcAFlfcf5F0C", "CUDEYlNP3nHBSFlwIYaITXDEcNFDQCj9yNYi9Eyd6ZKwX5Qmo6FXSAt98y78ASZAfTTHM1GuTzrWknx+", "5FRvaplPNJZvGZ2E3KfQMB8RJjOZzN7BUescNA+ub1qHrWbj5iCTyXS7pN1qNfP7zWajhweNSWuvMWjd", "trLZbLdLMpnMwfn+XJc3hObNgEtcfSTucI/ainpmutAqcSQhblEpltFfrhH3KDERjY6zwagXCrJrpIJs", "LKT0rzlHrx0npkKxhMqVai2D6ju9TKFolzKwXKlmysVqtVIpl/P5fH69mLYJTw9XN/NB//iiVrWPebr1", "tBqfLfs/CJN6SWdGZtlsUap1wkoC0tjQPa5mDvC7hn70kKvXQAf8p26MCkVQhs9ECdWAEFf1JeNjfWih", "v74nXa4j+ozXmvnpM1ZrSY6NMACtREUbEtxHXPxUfLjRQd+OjHnjRjj66pUhAQMr4M9aGJVyN3qyqOti", "kRhO9McQ8uGn4LaTOyCAaZ7+Ab+6lucwsRxfRSaeH9xdN7b0rYeISDKo6xjBDSnw2rROMFJEEH89G3Pl", "nUyo0NHis62di3VJp3phFM/X7/O3eC8a4bOReXr7cJqEKJpIJEycHUs1OlNPLY0G3RDXKpQ1xPRc582Z", "5vwwP8pfFsgvhoDItnf2Lto/l6sGy1zUL+RcwKaW7yprvlRBVSaIdhpqqgttPzo6KUo0Gw04i7o1nq+D", "2Qw+95UJaqjEdAGkWi6AmFA1EE8rh14wiHaIITLGjBI5vrJrRlp0CbSEDx1grDKhd1rNuym1qw2X0ye6", "it54Y/4MCSrpzuThuOuXFl7/0a5oS4pYJkRogtgQHkkXs4E26xND5J1KS5rfBzNQfIGb7MsBY5Ql2ISR", "gFhZBudtYTHjCuSJVotFuTBsvACAXo/khsZXzX3LQlyupQ+x4zOpPproermgiEIfNlzgmrOoyIWVrQis", "XwhODEI2wzDspRHtOqw1yWNujvHM0BwMGgSAxl1UyozNplnzk7K4qll3BRwkWlYc/jSzXy36LBl1wM1Z", "B6g2uI+twMsSTqpSVtZZvswCE/XKYElvSeNYsS3hfhg7hRUPpZ2Lm6FcMc1EVMFBAguHgy1n0IH+idrQ", "OtxEeOEWFkI8MHf/vG1V/h5w/EC0XUgPmS3G5KYEZyzZqmOyeeZ8dVf758l5J3O4+ebDaRbTnDs1SRA5", "sx+7K7A2nyeUDpaceNqUWLWBW+PfxKuhLM9PA2+QbH3WnwMzdXKbNzlGjJn03fPxyz0fP81pwbnz9FaX", "xL8y9jieB/Gz0hieVsemHahIumibWCh8xLuMCYhrcllwM0QcdUmsdzTnQF7WNvI4dcbI5JUJhtEYheNn", "QSPErzNNq0hCPvs8s9HDsUlNw65HWcQF/c+FILp/zhwgXWKY94zpbobXeW6ZgN65UPG3h3v//KSLHwgg", "3zCgYpMI8I2HWh+/vXKE1mVnm4DtIBpkgf6Wufj+raK2o2lb78Hcv20wdzyGe2aSjLjGPMrFgCG+XVTV", "e0D4v0VAuAenUtz/l1y+iuw2voG7JCDNiw7AgiOnr2pCTPVghKrU+DBpf87GxigVgLIugWRqKi9IREet", "8Sqg0EKcf1IwBxM/cSQ46GPk2MGYC8vBHOABoSzIXtyI3f4HxLNHEoDX9ou2fUOE+uaX/+YR5/vnh5eO", "P8BEX2eLKucKDS1xPHOQN3V3zJV+YdYQC2QJn80Fm4Sa1AIZ/5jD40ccFvOM4s1HfU7fma1kDr50HDFf", "Y3iexSbMhXP8nV60JnVdc7hXRnEEMCXpdTPBe3niQ6g1/Ej2AyLcZ+jJgyyo7bW6DM+Bag+CrB6gO4KI", "UgHQC45afqKhmRukR8xWo3MkwtQIkyqB7X9JjsQMrJWJErVK5ccSJaKxbwvZEjZmP5gsMYfNMFHC5E38", "DGRumjGxH6HeIDRkPhjAW84b3/p5SWrbVIsbYWiqrkCwK3nLrpIjddzBz01Om4sFUOKqbAPiWlGAu4Ue", "jQjn27RPFP3L+nxPp0IH0c/y3lmG0haLSER9TrIHjBSFSbhnNnM+WTov2jSfGzjZQaaW/C8IVtKofovL", "9XD/YtssnNb+hdF4ASU9Ctm6fBwbP7n9wZNG95ME4smF1pO8UJbsK/bJk+f3nkZo+jSEfLi+FSYcWUa8", "Wd1ScqhZQOaimRYSX950vgJWyrCIPS0tWLbAopRJZjuEdnTmVpiTDzgSqvTRUolx3f2qA+hVhbi5sVPp", "jcTN3yBP8hdKHWv8nO85mv89OZprUjOffrfczKelyZnJtuH3BM0tEzS/r0BtJzLqD2E1AEtFguhKLpRJ", "eVP+M+Ha5ZEbI7E6QGS82SgRfArkECS2w13snlo3q24cn7Qv5MYR4W2Z17AU749ByuwWSN/DxAYwzHAi", "SEwoGwEd1KLzm8ArJeoaZEhCZQkgGOz3saWif7pEDClHYY+wFKa6lpEQmAzCK0+OlHRhJrtISMQPIXum", "AV4ohhZMqyyZ0POcqcpyjVa8nU26JDhpBYkGwwd3i7KnLQ167Pr5fMnSfdS/0T9y+jcX8pH+5ev/6l/a", "jab+4X+xx5HY1b+qf+vf14dQJJ2Fo+blW4KNer41QmK5SwgSLT3I+7Zz0zjfb1zvg46gTOpllgM5B3tq", "iOx8yVPzR8bMsGV515uh1t7mI9FCV7FkmqqKtA2a1PV8gcABGWASBHx2yU1Yf1INNFcRdoLF0Mh3R81L", "YOI00saBgbnSfeOGdB20qosQz9zWqmJfrHZpWCq2Sz6awFmWgR7O6C33fWzrHf8YSDJmOikWiBjU25SS", "nRU+XkSlXKL+HinOGa4pcAdF/fAR/EqqN/hUxaRDVEL5N7bV6EEl1yzoIATC0CKH+nZ2QOnABPBxfXRU", "Qc9cWBDW1OCNF4BVoVy+I3DGQB4Wi7UcyhEXgZBm6I/8Yeq0BsdTH8yw2yeJZkvyLhLPgZ9HMvK3KK+e", "zEYMXtS6QdBcwqtGiZ/kpOOrjme2S1S0tDkkCusmoCRSIyAULM00ylqSBXcKAi0McwAZ2u0SADLgoxQ2", "d/9CLsQOtr9/3AUNAtRfANo2Q5xrVYIhjyGu1JdwLksOAeaWlQWHlAGDvTT4CB1sof8XCdr8mDUzm/ux", "ofttCYOe2gyxbG53mlEOqwz0vP8HPY97VGQHplPQJwqS0ly2xYZZf1B2WMI1hwLbxYQn4sCmLsRk9y/9", "XzmhIk/Q8bFAQP8K/vAYdiGbflqc3HH0hEECsrlpoTB95zEyI72PUqT6OAdTMtWtPppBqWbNHFQuLiTT", "Lgnw252TXdWBWzgVqVAYDc7DppuXMnrq7iKaU+mUQXD0x1/ywEN47/680rzqbpbjP83nKEJuIWJDIjI9", "BrGdKeVLlUJprZIUGS69rtLvUaD6byE8rE7IN2xJGwdmRpU/qKeH/5SYlL++2vvcgD9enbQViafZQoIO", "uq3RBVVEu631hU2idQ6C9jruiYsepWLTzodhh0QhcWGOras5G1/vOgOzarcK14fRlW0BQmIs9iWjY8x1", "YAy4vT7bKKQ6Ebpomtevd0FrUtQ/b5BtczP1dBCBTpxcG4fUuZGtfoUDOvqmkLFJ5hesvsZGpBaZDm1D", "WXA/RCR4GyQfrQ4vO2B5sbqYYNd3u8RGfVWquTeNtFNyTfxyKRd3yjvVWnGnuszIpMX1J+ptlKgY16Rm", "3c2TI8mytfJOqWQ03U/pKkpw9Rw0/2iJyX8TyAV6kbxLIODIg0wyR9PaRlLj0sKuumCx4IBOSDBFFrTN", "+F0ye5vDzCG1iAmS2jGfgRF8MzxUPbAyUqYAhrqE+56+8beIydG4ulHjrr1IY1QSI4C5U/o1oEaVg7cY", "pIA95GCyVms0yzQZFyDoZrS7odGzwqAsPUpPKnxGJ1SpjGEd7mziZR3A4vkseMBqERzzMXzFw3TScVP/", "VOAxSsU/IzBCHjpetWFjMffR9pHkeTPiUE3MoOqX2YAqACsQILWisDxPEuz7YYIWUU+9ANrvEk7dKBny", "tImyc6EKSwuPWTBn7KB1iUFCNhJ2F648OA6JMXe8R90Nck0DT81H2V6dq49G9ckuuHJXpsyH/VeQullZ", "DIAsaMZDZDuX+18kU5tRVmTt3LNfEpY7n2fW01JyCFJ67vgnHMEZ+SyRSlHgw944yzJ0xW6dZWryM0Mu", "utkA8TIgc523uMfmx1nJn4Is0Tj6tkrITOsjrf+pgdb/DurxmazNhTMeueMjU8GJnAZOeGYIM2zoY/NX", "5J8ceuGfrxoY/QYPgl4t9iX+R6SfCt0OKyuYv4L8E/PDLCo7nRoo38PACgcYSJEpVGh0VEi0A6YiI8U9", "2HPiQ8sP4cT6j/jH+VEYnMzmoSIx4DyVTjl4HIdACRXQyeg4X2pJqMfck+xr9q8MHcNUOjXhzpItkkR8", "akrnxSlqMc3iB6y4rWjke3x87ts0Q6iqQGVvl7/mEygEIvbm4ZinYSz9NiKwJ2kgwa2ufucAsoGpCGAu", "FnkgVOYkAzp4XxX3kCKUZGYxWxyh3BV/9imz0KoqY8u1RTNBWH5rNrT+krFRzx9slsJ6aqo9/EAy72za", "Q53313Sob2f2IF9iy1WZevGexXwxn9/J17L5RPukiuhIzkkc0WeckJAofx76vU1SOSEfzVslysUk/T3y", "3sgMjtL6h/cM+LOpzObORpxh5euSvQkqEs0bYiTxmvx9oirOLAQSEc2GdMtlwy+7UxXf3wQ7SWcqOdxP", "iuZL3pAZoCWpkkYzW/wiqIBO0qc5LKhJ0+HLsvpBV905vTTWKq0eqnO280GtGmPpM58m/uYpiNpY8zpn", "rPlSuNGWxh/daY3pZ4SmKnxskTN1kJHBgybAgVPqx8NV/MRCDg4kAz85QDvwOujMpoVXhNImXoPJVgSB", "HrKoizgwVua0elIOffOVMiBld8gQ4MiixIYmNz5izkXk6baTvb05zNTf6sc2WY4/rTxCkP+vhp154ef9", "O4Ta6DkRx7Paz3Pbpn5fPmKxuJkVMpwh6TxeNFtvpKJwhF/yPqmxK+3+lfDWGyIi0ULXUK++Kq1bOfM5", "EunQ29mnDPSRsIZSlTWjZEFLinXI+F3+6TPnn0qPQiKwa6S7RKvxsbRkZXQx2QVKBVsSAaDD4xKC9SCR", "YyGsEp6gKTcG/jCbvAvyxWq+3CvasIp2KuWeXSr36r16EdZLFVSBtZpd7FXz/T78lNZBXT0GiTXMOHiE", "AAtrn8zGY0PkzAorSEn709ypWmyRLCr0FxNSNuhmkqZWBxfuI4GYqxT6yRAZ1GjnZuwdShcSOEAM/GFB", "YjvIw+QTwDYiAoupfvRXny8VqwGVErVQ7hg0KeG+ixiw5OFS9Vnmk88hB5aDJaOKtxki0iXhWQrPgRQp", "g4O1pJry5hGw8/HcC4QwNFuxaO9NvsuXXPJJJYPM1axmSKTNpSXt32vW/4Y165O3IVH1XCINrVnMcnDS", "s1FXQbYCKq7SjtHWuuqP9Eui0+AxxJ8tVhhrsTxzRrTKgkPsIDBwaK9nwnhCe1+6S9AgCz6q7HE+zPzf", "j3PcXbh+YuHspQ82Xhi/afhg4wq4gkdEew4kI10dT1cdimT9BsPEXhQF99ixLfUyppICg+WY1ZSzhUJ2", "YSmlbAn+uBvW7Nd+mPOS4GFLPApK7xLYXZZDs7qaPPLoknEdbCGTIrnU4LBKY0942dmVgnbit+T7J3YM", "NhIsF7VknQW6CuU/4vNPppPYk6+LoeyQQKVNZASlDn/zUdm+KO2y9NMF3oUHrl1Zj3TTLjn1KHmyzc+1", "KoTFfXcFCwhoPmgKfC4ZUOPs6GL3uNE5Vt6PeM3/ISxWqruVYqVWr9uoZNvlcnmnZhVrdrlQK1aq9VK1", "2ivmS/U8rPaqtXytn4eFnVq+XCuhsi3/UYXlfmLGylJK+jFqwQPteVrB/99CMMaXso5u0uEmyy31HU/r", "P28KtoUcJeec7JkvSo2Z5XIbF99MSF6ZZj5dmcsprwdl3zKeUcEQCrQc9YrFEovikwmEUwLHSjPhfHp6", "sNpESp5D6DKNVZWD20htDVsmTafKPyypWGCT/pOnahrwDSoztCEJayBwM+T84/1GG9pstGCM78vAns9b", "WRabs7b8xaqJLtfNo8/OU/B80GoXYOgtSZ5sswMbs7Flu6QRVAhWZV207PPRVDj8mAYfZ0Xv1F+m2N5H", "MFuHCqHtkh6aKStKVFIFYvSIrhZ74vGQlNk6zNZjyEK2UuSxroijIykgVwkVUkHt0XFixkOkFOPfV4Fx", "64qLmyVgDryBKaJqArLNbsw4UaiCL9G6Z9UY54IHL4/ACE3DOjfyLpiFaSilLm40iN1xmUz49sjl0SW4", "vN07azXB6cED2Du7aJ6qz13SJe5V63zvqGF1LLp30Ng/69cfjkfo9aQKbaf9MKnBo6OWcwIdUT95Lr7k", "9oqnn4etfst/ORLe3XMNdcnZ9WD/tlZ9hjcV726/4h62T0reCBF0nbNu3G/frkbn0ys+/FKkV18mB6+3", "nV6hed5u9ptHg9GX+lWxS14fR6xlNdlh/qo4Yac9B/r28PYzvoOksc/dQv3h4BvvVRq3pZotblm7dPVg", "3w92rj9/wZf9u/p1l5zuPd/kS+O7vQu73eEPpZ0z2CTVlle4GHv11gHNtdDB3UPhm9u8uGzA03zv5Ljk", "9wflpo9G/PNNp0smV/c3qHn24j+eVS/aX+jF5elk3L7qv/QGhS/79bH/mD8Vzznr/Lj4Av38i8sb/s7x", "iYdG44vL6xenS6bfxPP0sc/oHUaHU2/yOBhfTQQh7Xpu0Dnwcyd3N+whXym6B7c3tabVq5VH1vHhzWG/", "PXLI6CjXJfn+bblxDSv58nHp5Tk/Ej1UGp9al1/o5YV/unfHjzvjfP726KExvUT+9HO9Zt3mHg6G7dqo", "1Lk7fe6SKmo9Dqa4fZGfOIWHo/3rU8t3JiO+0/jsO6NBgd70yrz06j6OL/O1I3rzcl8uPsPTyn3n8/nw", "EaEuqVfzX+jdsGcVTr3O5+f+I33m7EA81i97t4+fH8aH9WuP2fcN9nzcOxkVT7zr08bLzfCFXzX43vCo", "0CX5M/+leA/be/lBsVW5tNr2Sc769kzzdctiz3tffPxyz3AF+zvtL179202u33k9d7ndGpB67tvjaZfg", "+pXv9P1azf82vM9NRLEnCBaDa/7tefjS9p8fbsuPvfJwJA7rw9Pb3JcvtXLx2/CscjppXDeuGntdIvYP", "jx7vr8eWezA43W8XTjuN+qN7N+qVToZnN+3C2Ze9KbwvDC3iNILfreOTMXTvnu1mZdwllmt9xlcnF3t7", "7b1mo1E+xAcH6LjqsuHhcc2/41dn7XYx/1CxHofk5aF+2HAVDTWPJvXD5mTU6pK9Sevo8IqeNBu8ubf3", "0GxMDprHg4PmYbnRaA5GV7Pen88fGrna3oM3cKadxuPD8fB5ejrsktznfvX1sn837h0X8wffSqNW7eJw", "7zxPzr583rstuP648/nbjd8p3Z+xvZJbOvId4Z1eH5ycngm3crDfJQV29PqlQW8KU2/noVU/a+zb7Wbz", "YvrceOb0/rZee7j1m59zPfLMbtB18ez6otmfXjZr1fudegVf3HWJW+l87vGr/UmtWTxjjt1ol9v7Pp0+", "FjpYHMHH8unV2Z34fHMAC2XMHzpHzedXWrt8qN+VTi5GlXyXDL7dD+rF81zPLR68dmo39dL9wX6v4Iyf", "yy1n/DJofTtFg0Lh9cvDi8seOo8nJ83++LX/2TnvVP2XwXGXPL/kTvJT57F4hntHrHrUaEwvdm7vWeOx", "M+m08wfW8019ctAkL6POvj/95t5P7sbne1/8g9Zd/QKVHrqkjW8L/ZPzOrdr+x4/fKm0P3+xSZtcdT4f", "s+eby9P9knvPnIZNDm6G9sNd/flx5N0P96e8lNvZQRddMhzl2RmZ5p/PJyPo93P4tn5hVb+M26Pns+v2", "yaByu3N3Oj3x7+/F6+QLeW6fV+6vD/e+nZb5I3Xb7S7pi97NceFzZdq7vs81SuO9Hny5vi+K2u3r+bP1", "ikadxwMMz853znLH1kmzdV24OqxX68V9u+EcHO7YXTIqDq7wQ+eqAeFJ/uSk8Xo8vh5dn5ydDU6LD1cP", "+Pj8bloUpZPpYZ8z6FYmneb9RX94iVrTs72bx5MuGTPv3LnsoT6/2anUbvrFvfOWP3h9ZM3K3ct+53T0", "OLgeFu6Oxp3WFWlOX0dX0+rBbfHbpYfvKzuSRw0vW18e2Sm1TkunZ52dHH49ubq5dsRzu/Fnl/x52b+p", "qRes9BtWK66eJVUdKUNPnDvJl/R7teH17yyudGH8rHcXo0XuEi2FcrxAMdeV8JQjKCIVQS4FGg6UyhXJ", "q1IF9rrkjyCe71Nisb2FzJqgvDvdsqDkz/X9xN07YIl3Z8MCHead9u306kRRsmHboas98BOYFxWhL4aU", "4VdkK31mscrDRg8kNjr3WIwujsu39Vr5wOZ7t2QqeqXeZHw9GBw7V07v4YtTI4X8eGdJffnEYhG3+lnJ", "UP3R2YLmvXt5pOKWNdvFZH3eA1ehNRJPSdpxB0l6NGYr/vflQfxIUb3lZeoa8ngrLc3QIIGuDobgan0S", "d1nQ0RZtDv4vmISmbhVprZqnQc8XKr9NEq8xDvG5sPj1BPaLK/+FSFhf+G9+b7cv/6cN8+rxTI1XTDST", "lsSv3gPZPz/csg5gYOx/UwHAjctO/ITyEaA3jby6mfCYQlCq2k6+QElLdyn8lLoSa6EhfRVxy7cGxoV8", "tCkssu1aSHSljW2xkng1RM1li4bCDYrA6hGitjB9F1qICXuLzrL5KmvaEjPhIs0xavthuufKIvNzZVd/", "0OK4MMxy6OcXuui+8AV9Mg+owDkv2+orfn4XEoodEv1am0f5FqPG/H1zwFoCj3WtOyMsxBJyObIYEhn9", "TnIogYbvEidVkYUcPSXa8xbNeRtItUHAQ2y4ZVW6KBtAEjHURiOEy/lSsZwc72OtF/lC/1LfgYOg9AYb", "WroYjA7RiJRfC6plQIdTU9fbMCgOWmZFc0LrsjXF69FFX7aabWtW0moEsWvxOneZxPCWnj8TMRgiGxzZ", "nKRL6CZSnHmLONKg25pIUiI8DdWKqE8iPBA0iqkH+SyhTAwz0EUMWzDrUepkifCkepZKpwqrPm+lT0QL", "VC+PnwhapYMLQ10itzfNmEB728kdQHnOyGYx+Yv+ODLd+B3n+XzPtX06pe26LFTnWTvHq8/Qdl2WPDq2", "rltC2Pa6LgsRqus6LHObfv+azHkClVm/GLmYDKuq0GAO+JD6jg0YUuFgPVXp/6KvRPfFTdK5xSrGWahk", "xoS9zwI1rosgMZGn0HFAQkOgTx7vEsiQZnxaJV6YF4ZtDZccY6picLRLSQLcJcx3kK77z1CfMpQGEwSG", "cBzWPVKnGahcS7m6HgJwAoNyj1gAzMlH0SUe5Rz3dNCzi19U4KMLhTXUvi2zH0DQgVLkJVMOaWeZ6zWS", "M73N2+hzeXcbk9SGPebrbmxBUBv2SH6obmPa2LD9Egf4FlQbfSl9+9TKMDlzkzIEJtdb1yFY9t6miasI", "js3XuQO2ZTIl8wlZljEZSz1fOLdbL+iNVQKSw0vmhvy69OpanvmZ5aUw5TJI8IxmSVILZw2L0RWvJAJ9", "x8uaOhHmqZtkFBrb0zbVXsK3lBLUPPWxsMkrgAvy90bGtnN2dHrA2g/4c7t9O/GP4XXjxL0+o63X637x", "237R3q+85vduXnLVl1V5gtEkG8QKyQYmI/UvPkwexFvqBoALyFSMshiCD9UPafCh8kEF/38o9j6Ez9/0", "EJDbo4LXuwQSgIjFpp5QBhU9UhZcSK48wZFXc3oICPMGpyoWOysa3CVhv3hY83J9ZbP4OiWGWz7DYtqR", "R11v+x6CTJ+VnvrXYTDdyf2NFJpVSynM63bhqFIX0nXTMenTpFQoXcpLUGPlViX1dACrLjXAs6lYMKbe", "xFTDg9YQgaJKg1QKQ+gNmUwmWag+KxeE6ctzZ63mwXnnIFPM5rND4TpaaBXqIFx09tT0JumdAVWzDkAP", "R6LDdlPF4Cko+WE3Vcrms4WULiGs0JSzHEoQz/2F7e+KWpKqKh4hHX2leaaqrwgMo5PnRoWxIxE8KKqN", "azB8fNsIP/oFjohVnjJlsJwVg1CFkTAlQLFYZOvqA2G595atQWlKiDsB+/Yggy4SStX4R8Kb/EFJlwB4", "QcFA1XXERJ09MQyC6naDZ46DE6eVPs0+41RYKJZQuVKtZVB9p5cpFO1SBpYr1Uy5WK1WKuVyPp/Pr4/c", "l+IkM5ZNtRnFfD6SmWRylB0TrZJ7NtXyZwCtFAciWPq++HZAFCfyiJR/4tSm9MLipC2ihU5zMgC29dSF", "Xz91w1fVsUdIOX6wBkTPXvr1s9+Sme9GnkAPMXk2QHi2NSTlvwOSEaETMrcFlb9j928JevF0/osq5wGo", "pR7os2MsXFFxwLz/8VXSSBiHroqZRJmQYl7heVLj5II/VF3rpFeXm7qgGwQETYKuaeBRuXQcJOpwUzxW", "2Y7HiMGAuSt+b1Q8BK2g5DpmUYWPLzKuS8qF4dWGySAu9qg9/XkUr0cP/GDf49enZGbfF/hN4WfP3rKT", "tt58VBWMlPiB7H8Z02EBft45zzvn2ZjzGKaRxGl4bq3gFJQtD3oo08k0KEMVyk/p8Gn2dJcoJciZ6veG", "lXzeV/7MZJlID3ymH8z4dUJFZJoEPM8v853G3mlsy9t98QjFKO3nqClbaCYBJteoJNHKY5spJeHA/2Vq", "SQxTCecojpd31eSdef2mqkmipCD5lza5RPWTBE1BNpmpCxvwkwiz+jfiIr9Ay4lgRg38d+s5kfnDsLSE", "I6VquqLJrPh8T5U+1cW/l2g/Ar2InLKYxuGZR+3G3Kv8syZIos3vMflYoiX27MoKArDphEg5d+lNvm8a", "qFMdPmGnCauPCebDyC2+4kIOxtnuShYU2LOOv92FTC2BRGZWE2oGWDhPDxOYlJWbfIzDujTBqzLazx3i", "//2Ofr+jf4s7OsZWQq6iYzdmp3mRXzmmQOOPaB0L7Aqs1DmwmKkaaWVrVMEakgRjAcywR31hsle574iV", "VgEJ/rtSst6sIfG0hAfKI5DM/9RbAYSqXBhs+Q5kpoY3+EMMqT8Ymkibk87F+afsf9zFf6SeqhrwDcjI", "hQT3ERfraSlsuQE5XSPhM8JVen3QTwGjrPNG/CKGVJQ8al4zCBtbVBFWWOfXbF/wmgMUIOqoNcX5dbIa", "JDnzdyYYLltZQYrtEAXv9LiWHmfIWiaYRLd7U8HkN6e1OHlsQHSRokOraS6sQpkoZeuH9NCLvDGjFxFT", "5IdsYCNdNpvGaC0MClDPoKyijADOd8JYTxgBrt4F9neB/T9ZYF/gTev5He9Rd7mAEQgLEOio6vhzKHyN", "3NAlc80hC9uol1Nmj7csdRHsXbS3vPwlTDoUW7M5EIzxX+IqUKtdwunUx/+263+26HlSsJHHqTNGuZ7j", "I4+ZN+qXm5n3Tfu9sPmvMdoG82wVm5L/BdMvt9cGbWb5wqpwxN99VQY7+B6msnhh/jZmpmAPVcU3pnNZ", "Qoo0Lu1oAnz0vlq4OPYjDX91fMfCXEmEEmkDYhUDfjPBAjpO+NxbUC8Y2ImrmwI7yPyXe6dmXblpCnaz", "W3NXfdKSZ01y6hmTZek0kXbqnZNfev/O1pDELsJIDYOMdz71rxHsNQX8fmI9DA+QpMMwETA4TTMyWx/z", "A0lYkj8gaA3ZrLZ/bwqU/JpMqJt72JFp/ibRu/Q3C9JLt1J9ANHf3qn4nYq3oWK0eIIk5YYJR8tvyAvT", "5I3nfj4XbGGhBhTFC6SuLocwevrvaAlZuRyJel00Kheti7Rc/4tXWfpFyl9yma6/WQVcUk8qYbN0SxBA", "ovMkA53QnhWA+lvVQh4A9a4U/qZKYScs5mYOEbJjfhRKIiJRrBScBiisp7IgnbQhJuAPU5QJU/IJhI9S", "xtNMoYez6l2XIe7rEj/Qwzn9+K3yYSKWMfYklhsXlRYy/6AcHGAyWDUBF3CA3jiNpR+lBjZ1oSoTqKdZ", "N87X7/8/AAD//86RvuTV6AAA", } // GetSwagger returns the content of the embedded swagger specification file // or error if failed to decode func decodeSpec() ([]byte, error) { zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) if err != nil { return nil, fmt.Errorf("error base64 decoding spec: %w", err) } zr, err := gzip.NewReader(bytes.NewReader(zipped)) if err != nil { return nil, fmt.Errorf("error decompressing spec: %w", err) } var buf bytes.Buffer _, err = buf.ReadFrom(zr) if err != nil { return nil, fmt.Errorf("error decompressing spec: %w", err) } return buf.Bytes(), nil } var rawSpec = decodeSpecCached() // a naive cached of a decoded swagger spec func decodeSpecCached() func() ([]byte, error) { data, err := decodeSpec() return func() ([]byte, error) { return data, err } } // Constructs a synthetic filesystem for resolving external references when loading openapi specifications. func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { res := make(map[string]func() ([]byte, error)) if len(pathToFile) > 0 { res[pathToFile] = rawSpec } return res } // GetSwagger returns the Swagger specification corresponding to the generated code // in this file. The external references of Swagger specification are resolved. // The logic of resolving external references is tightly connected to "import-mapping" feature. // Externally referenced files must be embedded in the corresponding golang packages. // Urls can be supported but this task was out of the scope. func GetSwagger() (swagger *openapi3.T, err error) { resolvePath := PathToRawSpec("") loader := openapi3.NewLoader() loader.IsExternalRefsAllowed = true loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { pathToFile := url.String() pathToFile = path.Clean(pathToFile) getSpec, ok := resolvePath[pathToFile] if !ok { err1 := fmt.Errorf("path not found: %s", pathToFile) return nil, err1 } return getSpec() } var specData []byte specData, err = rawSpec() if err != nil { return } swagger, err = loader.LoadFromData(specData) if err != nil { return } return }