From 2356048b9a307050aa410d44527a9406bc01bad6 Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Mon, 26 Jan 2026 09:12:39 +0100 Subject: [PATCH 001/232] ROX-32813: Update banner text for VM vulnerabilities view (#18651) Co-authored-by: Guzman --- .../components/VirtualMachineScanScopeAlert.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/components/VirtualMachineScanScopeAlert.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/components/VirtualMachineScanScopeAlert.tsx index 9188430e410b0..225fe15ea3d95 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/components/VirtualMachineScanScopeAlert.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/components/VirtualMachineScanScopeAlert.tsx @@ -6,7 +6,7 @@ function VirtualMachineScanScopeAlert() { isInline component="p" variant="info" - title="The results only show vulnerabilities for DNF packages, that come from Red Hat repositories. The scan doesn't include System packages, which are preinstalled with the VM image and aren't tracked by the DNF database. Any DNF update could impact this default behavior." + title="The results show only DNF packages from Red Hat repositories. System packages preinstalled with the VM image are scanned only after registration (for example, 'subscription-manager register' or 'rhc connect') and at least one DNF transaction (for example, 'dnf install' or 'dnf update'). Scan results refresh every 4 hours by default." /> ); } From 162736ede39126fff3eabb51b1eef36853033ae5 Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Mon, 26 Jan 2026 09:14:10 +0100 Subject: [PATCH 002/232] ROX-32685: Use camelCase for VM fact keys (#18654) --- .../internaltostorage/virtual_machine_test.go | 20 +++++++++---------- .../storagetov2/virtual_machine_test.go | 12 +++++------ .../dispatcher/virtualmachines.go | 15 +++++++------- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/central/convert/internaltostorage/virtual_machine_test.go b/central/convert/internaltostorage/virtual_machine_test.go index 14234ff71fc89..989ce36234aed 100644 --- a/central/convert/internaltostorage/virtual_machine_test.go +++ b/central/convert/internaltostorage/virtual_machine_test.go @@ -29,7 +29,7 @@ func TestVirtualMachine(t *testing.T) { Name: "virtual-machine-name", ClusterId: uuid.NewTestUUID(1).String(), Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", + "guestOS": "Red Hat Enterprise Linux", }, VsockCid: 42, State: virtualMachineV1.VirtualMachine_RUNNING, @@ -40,7 +40,7 @@ func TestVirtualMachine(t *testing.T) { Name: "virtual-machine-name", ClusterId: uuid.NewTestUUID(1).String(), Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", + "guestOS": "Red Hat Enterprise Linux", }, VsockCid: 42, State: storage.VirtualMachine_RUNNING, @@ -54,10 +54,10 @@ func TestVirtualMachine(t *testing.T) { Name: "virtual-machine-name-2", ClusterId: uuid.NewTestUUID(2).String(), Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", - "Node Name": "node-1", - "IP Address": "10.0.0.1", - "Description": "test description", + "guestOS": "Red Hat Enterprise Linux", + "nodeName": "node-1", + "ipAddresses": "10.0.0.1", + "description": "test description", }, VsockCid: 84, State: virtualMachineV1.VirtualMachine_STOPPED, @@ -68,10 +68,10 @@ func TestVirtualMachine(t *testing.T) { Name: "virtual-machine-name-2", ClusterId: uuid.NewTestUUID(2).String(), Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", - "Node Name": "node-1", - "IP Address": "10.0.0.1", - "Description": "test description", + "guestOS": "Red Hat Enterprise Linux", + "nodeName": "node-1", + "ipAddresses": "10.0.0.1", + "description": "test description", }, VsockCid: 84, State: storage.VirtualMachine_STOPPED, diff --git a/central/convert/storagetov2/virtual_machine_test.go b/central/convert/storagetov2/virtual_machine_test.go index ac057de4fb0cc..04076952ee5ac 100644 --- a/central/convert/storagetov2/virtual_machine_test.go +++ b/central/convert/storagetov2/virtual_machine_test.go @@ -42,9 +42,9 @@ func TestVirtualMachine(t *testing.T) { ClusterId: "cluster-456", ClusterName: "test-cluster", Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", - "Node Name": "node-1", - "IP Addresses": "10.0.0.1, 10.0.0.2", + "guestOS": "Red Hat Enterprise Linux", + "nodeName": "node-1", + "ipAddresses": "10.0.0.1, 10.0.0.2", }, VsockCid: int32(42), State: storage.VirtualMachine_RUNNING, @@ -58,9 +58,9 @@ func TestVirtualMachine(t *testing.T) { ClusterId: "cluster-456", ClusterName: "test-cluster", Facts: map[string]string{ - "Guest OS": "Red Hat Enterprise Linux", - "Node Name": "node-1", - "IP Addresses": "10.0.0.1, 10.0.0.2", + "guestOS": "Red Hat Enterprise Linux", + "nodeName": "node-1", + "ipAddresses": "10.0.0.1, 10.0.0.2", }, VsockCid: int32(42), State: v2.VirtualMachine_RUNNING, diff --git a/sensor/kubernetes/listener/resources/virtualmachine/dispatcher/virtualmachines.go b/sensor/kubernetes/listener/resources/virtualmachine/dispatcher/virtualmachines.go index ad1e813ca3fa4..fb8884eedb417 100644 --- a/sensor/kubernetes/listener/resources/virtualmachine/dispatcher/virtualmachines.go +++ b/sensor/kubernetes/listener/resources/virtualmachine/dispatcher/virtualmachines.go @@ -11,14 +11,15 @@ import ( kubeVirtV1 "kubevirt.io/api/core/v1" ) +// Keep the facts keys camelCase to match the style used elsewhere in the UI. const ( - GuestOSKey = "Guest OS" - DescriptionKey = "Description" - IPAddressesKey = "IP Addresses" - ActivePodsKey = "Active Pods" - NodeNameKey = "Node Name" - BootOrderKey = "Boot Order" - CDRomDisksKey = "CD-ROM Disks" + GuestOSKey = "guestOS" + DescriptionKey = "description" + IPAddressesKey = "ipAddresses" + ActivePodsKey = "activePods" + NodeNameKey = "nodeName" + BootOrderKey = "bootOrder" + CDRomDisksKey = "cdRomDisks" UnknownGuestOS = "unknown" ) From a8b9d34905325bcf43f79a5c15163f4688d6b3e3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jan 2026 09:24:58 +0000 Subject: [PATCH 003/232] chore(deps): bump github.com/openshift-online/ocm-sdk-go from 0.1.491 to 0.1.493 (#18667) --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 92c5588580c3b..271f946182140 100644 --- a/go.mod +++ b/go.mod @@ -94,7 +94,7 @@ require ( github.com/onsi/gomega v1.39.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/openshift-online/ocm-sdk-go v0.1.491 + github.com/openshift-online/ocm-sdk-go v0.1.493 github.com/openshift/api v0.0.0-20251122153900-88cca31a44c9 github.com/openshift/client-go v0.0.0-20251123231646-4685125c2287 github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b @@ -422,8 +422,8 @@ require ( github.com/olekukonko/errors v1.1.0 // indirect github.com/olekukonko/ll v0.1.4-0.20260115111900-9e59c2286df0 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect - github.com/openshift-online/ocm-api-model/clientapi v0.0.446 // indirect - github.com/openshift-online/ocm-api-model/model v0.0.446 // indirect + github.com/openshift-online/ocm-api-model/clientapi v0.0.448 // indirect + github.com/openshift-online/ocm-api-model/model v0.0.448 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/operator-framework/operator-lib v0.17.0 // indirect github.com/package-url/packageurl-go v0.1.3 // indirect diff --git a/go.sum b/go.sum index 9c2047aa990b8..6b2a28769652c 100644 --- a/go.sum +++ b/go.sum @@ -1272,12 +1272,12 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift-online/ocm-api-model/clientapi v0.0.446 h1:NqKdgNQZF8D57diDx4nMAyFBptT1Xg8DBD/51ni79TM= -github.com/openshift-online/ocm-api-model/clientapi v0.0.446/go.mod h1:fZwy5HY2URG9nrExvQeXrDU/08TGqZ16f8oymVEN5lo= -github.com/openshift-online/ocm-api-model/model v0.0.446 h1:FbkSxBzoeC4NGJmCGRmoBtO1AX+Y7acepLPn3LtQCcE= -github.com/openshift-online/ocm-api-model/model v0.0.446/go.mod h1:PQIoq6P8Vlb7goOdRMLK8nJY+B7HH0RTqYAa4kyidTE= -github.com/openshift-online/ocm-sdk-go v0.1.491 h1:gkj6YP5EACzjEyBLPzso4pPo2kb1/igTkID10XaX9sw= -github.com/openshift-online/ocm-sdk-go v0.1.491/go.mod h1:2tO4+e1UzOpSCS7CSMOiB+tA2EIPdT91XMyw7mcKR+k= +github.com/openshift-online/ocm-api-model/clientapi v0.0.448 h1:XMsU5Rk5zwWNlXgTeEv9uEXLrLbEVIz2YzE/6XCQgFw= +github.com/openshift-online/ocm-api-model/clientapi v0.0.448/go.mod h1:fZwy5HY2URG9nrExvQeXrDU/08TGqZ16f8oymVEN5lo= +github.com/openshift-online/ocm-api-model/model v0.0.448 h1:TbLHcMsyX/4UQtUlKIt1qGbE+CaIqmF0O2Z2vokvNh0= +github.com/openshift-online/ocm-api-model/model v0.0.448/go.mod h1:PQIoq6P8Vlb7goOdRMLK8nJY+B7HH0RTqYAa4kyidTE= +github.com/openshift-online/ocm-sdk-go v0.1.493 h1:+889zmbwN0guA8LFRr5WHpH2+VJNq8+r0fvrXY+x/6E= +github.com/openshift-online/ocm-sdk-go v0.1.493/go.mod h1:ThqKHtIyvTvDA5AxGFZph80sllVr63lZ+sb4qQP57+o= github.com/openshift/api v0.0.0-20251122153900-88cca31a44c9 h1:RKbCmhOI6XOKMjoXLjANJ1ic7wd4dVV7nSfrn3csEuQ= github.com/openshift/api v0.0.0-20251122153900-88cca31a44c9/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY= github.com/openshift/client-go v0.0.0-20251123231646-4685125c2287 h1:Spullg4rMMWUjYiBMvYMhyeZ+j36mYOrkSO7ad43xrA= From 4bcd99805d868f6334353da7c561f10947dece05 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Mon, 26 Jan 2026 08:36:37 -0500 Subject: [PATCH 004/232] ROX-32795: Add link from NoClusters to cluster registrations secrets (#18635) --- ...sterRegistrationSecretTechPreviewAlert.tsx | 36 --------- .../ClusterRegistrationSecretsPage.tsx | 8 +- .../SecureClusterModal.tsx | 4 +- .../SecureClusterPage.tsx | 2 - .../Clusters/ClustersTablePanel.tsx | 18 ++--- .../Containers/Clusters/NoClustersPage.tsx | 73 ++++++++++++------- ui/apps/platform/src/hooks/useAnalytics.ts | 12 +++ 7 files changed, 69 insertions(+), 84 deletions(-) delete mode 100644 ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretTechPreviewAlert.tsx rename ui/apps/platform/src/Containers/Clusters/{InitBundles => ClusterRegistrationSecrets}/SecureClusterModal.tsx (93%) diff --git a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretTechPreviewAlert.tsx b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretTechPreviewAlert.tsx deleted file mode 100644 index 5299cde932a58..0000000000000 --- a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretTechPreviewAlert.tsx +++ /dev/null @@ -1,36 +0,0 @@ -import { Alert } from '@patternfly/react-core'; - -import ExternalLink from 'Components/PatternFly/IconText/ExternalLink'; -import { getVersionedDocs } from 'utils/versioning'; -import useMetadata from 'hooks/useMetadata'; - -export default function ClusterRegistrationSecretTechPreviewAlert() { - const { version } = useMetadata(); - return ( - - Cluster registration secrets (or “CRS” for short) are a modern alternative - to init bundles and will at some point replace init bundles entirely. Cluster - registration secrets and init bundles differ in their specific usage semantics — - please consult the{' '} - - - RHACS documentation - - {' '} - for details. In any case, you only need an init bundle or a CRS to secure a new cluster, - not both. - - ); -} diff --git a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretsPage.tsx b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretsPage.tsx index 77927add44412..0889f0250a56c 100644 --- a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretsPage.tsx +++ b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/ClusterRegistrationSecretsPage.tsx @@ -1,6 +1,6 @@ import { useState } from 'react'; import type { ReactElement } from 'react'; -import { Alert, Bullseye, Button, Divider, PageSection, Spinner } from '@patternfly/react-core'; +import { Alert, Bullseye, Button, PageSection, Spinner } from '@patternfly/react-core'; import LinkShim from 'Components/PatternFly/LinkShim'; import useAnalytics, { CREATE_CLUSTER_REGISTRATION_SECRET_CLICKED } from 'hooks/useAnalytics'; @@ -15,7 +15,6 @@ import ClusterRegistrationSecretsHeader, { } from './ClusterRegistrationSecretsHeader'; import ClusterRegistrationSecretsTable from './ClusterRegistrationSecretsTable'; import RevokeClusterRegistrationSecretModal from './RevokeClusterRegistrationSecretModal'; -import ClusterRegistrationSecretTechPreviewAlert from './ClusterRegistrationSecretTechPreviewAlert'; export type ClusterRegistrationSecretsPageProps = { hasWriteAccessForClusterRegistrationSecrets: boolean; @@ -63,11 +62,6 @@ function ClusterRegistrationSecretsPage({ headerActions={headerActions} title={titleClusterRegistrationSecrets} /> - - - - - {isFetching ? ( diff --git a/ui/apps/platform/src/Containers/Clusters/InitBundles/SecureClusterModal.tsx b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterModal.tsx similarity index 93% rename from ui/apps/platform/src/Containers/Clusters/InitBundles/SecureClusterModal.tsx rename to ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterModal.tsx index cb94f6b9796d6..386e8a91e1c60 100644 --- a/ui/apps/platform/src/Containers/Clusters/InitBundles/SecureClusterModal.tsx +++ b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterModal.tsx @@ -42,7 +42,7 @@ function SecureClusterModal({ isModalOpen, setIsModalOpen }): ReactElement { return ( diff --git a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterPage.tsx b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterPage.tsx index f0fdb57bc0496..e1fd1e2d5f8b8 100644 --- a/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterPage.tsx +++ b/ui/apps/platform/src/Containers/Clusters/ClusterRegistrationSecrets/SecureClusterPage.tsx @@ -21,7 +21,6 @@ import { clustersSecureClusterCrsPath, } from 'routePaths'; -import ClusterRegistrationSecretTechPreviewAlert from './ClusterRegistrationSecretTechPreviewAlert'; import SecureClusterUsingHelmChart from './SecureClusterUsingHelmChart'; import SecureClusterUsingOperator from './SecureClusterUsingOperator'; @@ -66,7 +65,6 @@ function SecureClusterPage(): ReactElement { Secure a cluster with a cluster registration secret - { analyticsTrack({ - event: SECURE_A_CLUSTER_LINK_CLICKED, + event: CRS_SECURE_A_CLUSTER_LINK_CLICKED, properties: { source: 'Secure a Cluster Dropdown', }, }); - navigate(clustersSecureClusterPath); + navigate(clustersSecureClusterCrsPath); }} > - Init bundle installation methods + Cluster registration secret installation methods { analyticsTrack({ - event: CRS_SECURE_A_CLUSTER_LINK_CLICKED, + event: SECURE_A_CLUSTER_LINK_CLICKED, properties: { source: 'Secure a Cluster Dropdown', }, }); - navigate(clustersSecureClusterCrsPath); + navigate(clustersSecureClusterPath); }} > - Cluster registration secret installation methods + Init bundle installation methods { // TODO after 4.4 release: if (hasAdminRole) { setIsLoading(true); - fetchClusterInitBundles() - .then(({ response }) => { + fetchClusterRegistrationSecrets() + .then(({ items }) => { setErrorMessage(''); - setInitBundlesCount(response.items.length); + setRegistrationSecretsCount(items.length); }) .catch((error) => { setErrorMessage(getAxiosErrorMessage(error)); @@ -106,7 +111,7 @@ function NoClustersPage({ isModalOpen, setIsModalOpen }): ReactElement { ) : errorMessage ? ( @@ -115,7 +120,7 @@ function NoClustersPage({ isModalOpen, setIsModalOpen }): ReactElement { ) : ( } headingLevel={headingLevel} /> @@ -124,13 +129,14 @@ function NoClustersPage({ isModalOpen, setIsModalOpen }): ReactElement { direction={{ default: 'column' }} spaceItems={{ default: 'spaceItemsLg' }} > - {initBundlesCount === 0 ? ( + {registrationSecretsCount === 0 ? ( {`You have successfully deployed a ${basePageTitle} platform.`} - Before you can secure clusters, create an init bundle. + Before you can secure clusters, create a registration + secret. ) : ( @@ -148,20 +154,20 @@ function NoClustersPage({ isModalOpen, setIsModalOpen }): ReactElement { - {initBundlesCount === 0 ? ( + {registrationSecretsCount === 0 ? ( ) : ( - )} - - ); -} - -export default SearchFilterChips; diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/components/SearchFilterSelectInclusive.tsx b/ui/apps/platform/src/Components/CompoundSearchFilter/components/SearchFilterSelectInclusive.tsx index 07833a6eaf98d..8e5daf55431fc 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/components/SearchFilterSelectInclusive.tsx +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/components/SearchFilterSelectInclusive.tsx @@ -7,7 +7,6 @@ import type { SearchFilter } from 'types/search'; import { searchValueAsArray } from 'utils/searchUtils'; import type { OnSearchCallback, SelectSearchFilterAttribute } from '../types'; -import { hasGroupedSelectOptions, hasSelectOptions } from '../utils/utils'; export type SearchFilterSelectInclusiveProps = { attribute: SelectSearchFilterAttribute; @@ -32,7 +31,7 @@ function SearchFilterSelectInclusive({ ); - if (hasGroupedSelectOptions(inputProps) && inputProps.groupOptions.length !== 0) { + if ('groupOptions' in inputProps && inputProps.groupOptions.length !== 0) { content = inputProps.groupOptions.map(({ name, options }, index) => { return ( @@ -54,7 +53,7 @@ function SearchFilterSelectInclusive({ ); }); - } else if (hasSelectOptions(inputProps) && inputProps.options.length !== 0) { + } else if ('options' in inputProps && inputProps.options.length !== 0) { content = ( {inputProps.options.map((option) => ( diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.test.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.test.ts index c5fa9d3fb9b70..0d201b1490d64 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.test.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.test.ts @@ -1,33 +1,4 @@ -import { deploymentAttributes } from '../attributes/deployment'; -import { imageAttributes } from '../attributes/image'; -import { imageCVEAttributes } from '../attributes/imageCVE'; -import { makeFilterChipDescriptors } from '../components/SearchFilterChips'; -import { - convertFromInternalToExternalDatePicker, - getDefaultAttributeName, - getDefaultEntityName, - getEntityAttributes, - getSearchFilterConfigWithFeatureFlagDependency, -} from './utils'; -import type { CompoundSearchFilterEntity } from '../types'; - -const imageSearchFilterConfig: CompoundSearchFilterEntity = { - displayName: 'Image', - searchCategory: 'IMAGES', - attributes: imageAttributes, -}; - -const deploymentSearchFilterConfig: CompoundSearchFilterEntity = { - displayName: 'Deployment', - searchCategory: 'DEPLOYMENTS', - attributes: deploymentAttributes, -}; - -const imageCVESearchFilterConfig: CompoundSearchFilterEntity = { - displayName: 'Image CVE', - searchCategory: 'IMAGE_VULNERABILITIES_V2', // flat CVE data model - attributes: imageCVEAttributes, -}; +import { convertFromInternalToExternalDatePicker } from './utils'; describe('utils', () => { describe('convertFromInternalToExternalDatePicker', () => { @@ -70,92 +41,4 @@ describe('utils', () => { expect(convertFromInternalToExternalDatePicker('>2024-13-50')).toEqual('>2024-13-50'); }); }); - - describe('getEntityAttributes', () => { - it('should get the attributes of an entity in a config object', () => { - // Omit EPSSProbability object that has been added to imageCVE attributes. - const config = getSearchFilterConfigWithFeatureFlagDependency( - () => false, - [imageSearchFilterConfig, deploymentSearchFilterConfig, imageCVESearchFilterConfig] - ); - - const result = getEntityAttributes(config, 'Image CVE'); - - expect(result).toStrictEqual([ - { - displayName: 'CVSS', - filterChipLabel: 'CVSS', - searchTerm: 'CVSS', - inputType: 'condition-number', - }, - { - displayName: 'Discovered time', - filterChipLabel: 'Image CVE discovered time', - searchTerm: 'CVE Created Time', - inputType: 'date-picker', - }, - { - displayName: 'Name', - filterChipLabel: 'Image CVE', - searchTerm: 'CVE', - inputType: 'autocomplete', - }, - ]); - }); - }); - - describe('getDefaultEntity', () => { - it('should get the default (first) entity in a config object', () => { - const config = [ - imageSearchFilterConfig, - deploymentSearchFilterConfig, - imageCVESearchFilterConfig, - ]; - - const result = getDefaultEntityName(config); - - expect(result).toStrictEqual('Image'); - }); - }); - - describe('getDefaultAttribute', () => { - it('should get the default (first) attribute of a specific entity in a config object', () => { - const config = [ - imageSearchFilterConfig, - deploymentSearchFilterConfig, - imageCVESearchFilterConfig, - ]; - - const result = getDefaultAttributeName(config, 'Image CVE'); - - expect(result).toStrictEqual('CVSS'); - }); - }); - - describe('makeFilterChipDescriptors', () => { - it('should create an array of FilterChipGroupDescriptor objects from a config object', () => { - // Omit EPSSProbability object that has been added to imageCVE attributes. - const config = getSearchFilterConfigWithFeatureFlagDependency( - () => false, - [imageCVESearchFilterConfig] - ); - - const result = makeFilterChipDescriptors(config); - - expect(result).toStrictEqual([ - { - displayName: 'CVSS', - searchFilterName: 'CVSS', - }, - { - displayName: 'Image CVE discovered time', - searchFilterName: 'CVE Created Time', - }, - { - displayName: 'Image CVE', - searchFilterName: 'CVE', - }, - ]); - }); - }); }); diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.tsx b/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.tsx index d895384dde9f6..b55cfe43fe650 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.tsx +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/utils/utils.tsx @@ -12,10 +12,7 @@ import type { OnSearchPayload, OnSearchPayloadItem, OnSearchPayloadItemAdd, - SelectSearchFilterAttribute, - SelectSearchFilterGroupedOptions, SelectSearchFilterOption, - SelectSearchFilterOptions, } from '../types'; export const conditionMap = { @@ -102,111 +99,6 @@ export function getAttributeFromEntity( return attributeFound ?? entity?.attributes?.[0]; // default to first attribute } -export function getEntity( - config: CompoundSearchFilterConfig, - entityName: string -): CompoundSearchFilterEntity | undefined { - if (!config || !Array.isArray(config)) { - return undefined; - } - const entity = config.find((entity) => { - return entity.displayName === entityName; - }); - return entity; -} - -export function getAttribute( - config: CompoundSearchFilterConfig, - entityName: string, - attributeName: string -): CompoundSearchFilterAttribute | undefined { - const entity = getEntity(config, entityName); - return entity?.attributes?.find((attribute) => { - return attribute.displayName === attributeName; - }); -} - -export function getDefaultEntityName(config: CompoundSearchFilterConfig): string | undefined { - if (!config || !Array.isArray(config)) { - return undefined; - } - return config?.[0]?.displayName; -} - -export function getEntityAttributes( - config: CompoundSearchFilterConfig, - entityName: string -): CompoundSearchFilterAttribute[] { - const entity = getEntity(config, entityName); - return entity?.attributes ?? []; -} - -export function getDefaultAttributeName( - config: CompoundSearchFilterConfig, - entityName: string -): string | undefined { - const attributes = getEntityAttributes(config, entityName); - return attributes?.[0]?.displayName; -} - -export function ensureConditionNumber(value: unknown): { condition: string; number: number } { - if ( - typeof value === 'object' && - value !== null && - 'condition' in value && - 'number' in value && - typeof value.condition === 'string' && - typeof value.number === 'number' - ) { - return { - condition: value.condition, - number: value.number, - }; - } - return { - condition: conditions[0], - number: 0, - }; -} - -export function ensureConditionDate(value: unknown): { condition: string; date: string } { - if ( - typeof value === 'object' && - value !== null && - 'condition' in value && - 'date' in value && - typeof value.condition === 'string' && - typeof value.date === 'string' - ) { - return { - condition: value.condition, - date: value.date, - }; - } - return { - condition: dateConditions[1], - date: '', - }; -} - -export function isSelectType( - attribute: CompoundSearchFilterAttribute -): attribute is SelectSearchFilterAttribute { - return attribute.inputType === 'select'; -} - -export function hasGroupedSelectOptions( - inputProps: SelectSearchFilterAttribute['inputProps'] -): inputProps is SelectSearchFilterGroupedOptions { - return 'groupOptions' in inputProps; -} - -export function hasSelectOptions( - inputProps: SelectSearchFilterAttribute['inputProps'] -): inputProps is SelectSearchFilterOptions { - return 'options' in inputProps; -} - // Pure function returns searchFilter updated according to payload from interactions. // Assume that update is needed because payload has already been filtered and is non-empty. export function updateSearchFilter( From e29f395d7631f290abcb205186947d60a94d2d7c Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Mon, 26 Jan 2026 20:22:28 +0100 Subject: [PATCH 011/232] ROX-32453: add internal token api capability (#18642) --- central/sensor/service/service_impl.go | 3 + pkg/centralsensor/caps_list.go | 4 ++ sensor/common/centralproxy/authorizer.go | 4 +- sensor/common/centralproxy/handler.go | 56 +++++++++++------- sensor/common/centralproxy/handler_test.go | 69 ++++++++++++++++++++++ sensor/common/centralproxy/testutils.go | 47 ++++----------- 6 files changed, 127 insertions(+), 56 deletions(-) diff --git a/central/sensor/service/service_impl.go b/central/sensor/service/service_impl.go index 25ae9fcdbb7bc..0c179ae841efe 100644 --- a/central/sensor/service/service_impl.go +++ b/central/sensor/service/service_impl.go @@ -141,6 +141,9 @@ func (s *serviceImpl) Communicate(server central.SensorService_CommunicateServer if features.FlattenImageData.Enabled() { capabilities = append(capabilities, centralsensor.FlattenImageData) } + if features.OCPConsoleIntegration.Enabled() { + capabilities = append(capabilities, centralsensor.InternalTokenAPISupported.String()) + } preferences := s.manager.GetConnectionPreference(clusterID) diff --git a/pkg/centralsensor/caps_list.go b/pkg/centralsensor/caps_list.go index 90dce22980ef2..ffe95d4ffc10a 100644 --- a/pkg/centralsensor/caps_list.go +++ b/pkg/centralsensor/caps_list.go @@ -82,4 +82,8 @@ const ( // instead of the deprecated NodeInventoryACK message. This enables proper ACK/NACK handling for // VM index reports and future compliance-related messages. SensorACKSupport SensorCapability = "SensorACKSupport" + + // InternalTokenAPISupported identifies the capability of Central to issue internal tokens + // for authenticated proxy requests. + InternalTokenAPISupported CentralCapability = "InternalTokenAPISupported" ) diff --git a/sensor/common/centralproxy/authorizer.go b/sensor/common/centralproxy/authorizer.go index a6a1abbd411cd..dd418e0f34d07 100644 --- a/sensor/common/centralproxy/authorizer.go +++ b/sensor/common/centralproxy/authorizer.go @@ -103,7 +103,7 @@ func extractBearerToken(r *http.Request) (string, error) { headers := phonehome.Headers(r.Header) token := authn.ExtractToken(&headers, "Bearer") if token == "" { - return "", pkghttputil.Errorf(http.StatusUnauthorized, "missing or invalid bearer token") + return "", pkghttputil.NewError(http.StatusUnauthorized, "missing or invalid bearer token") } return token, nil } @@ -131,7 +131,7 @@ func (a *k8sAuthorizer) validateToken(ctx context.Context, token string) (*authe } if !result.Status.Authenticated { - return nil, pkghttputil.Errorf(http.StatusUnauthorized, "token authentication failed") + return nil, pkghttputil.NewError(http.StatusUnauthorized, "token authentication failed") } a.tokenCache.Add(token, &result.Status.User) diff --git a/sensor/common/centralproxy/handler.go b/sensor/common/centralproxy/handler.go index 06b16e40b0e8d..a59d6df632771 100644 --- a/sensor/common/centralproxy/handler.go +++ b/sensor/common/centralproxy/handler.go @@ -2,18 +2,21 @@ package centralproxy import ( "crypto/x509" + "fmt" "net/http" "net/http/httputil" "net/url" "sync/atomic" "github.com/pkg/errors" + "github.com/stackrox/rox/pkg/centralsensor" pkghttputil "github.com/stackrox/rox/pkg/httputil" "github.com/stackrox/rox/pkg/k8sutil" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/retryablehttp" "github.com/stackrox/rox/pkg/urlfmt" "github.com/stackrox/rox/sensor/common" + "github.com/stackrox/rox/sensor/common/centralcaps" "google.golang.org/grpc" "k8s.io/client-go/kubernetes" ) @@ -25,6 +28,17 @@ var ( _ common.CentralGRPCConnAware = (*Handler)(nil) ) +// proxyErrorHandler is the error handler for the reverse proxy. +// It returns 503 for service unavailable errors and 500 for other errors. +func proxyErrorHandler(w http.ResponseWriter, _ *http.Request, err error) { + log.Errorf("Proxy error: %v", err) + if errors.Is(err, errServiceUnavailable) { + http.Error(w, fmt.Sprintf("proxy temporarily unavailable: %v", err), http.StatusServiceUnavailable) + return + } + http.Error(w, fmt.Sprintf("failed to contact central: %v", err), http.StatusInternalServerError) +} + // Handler handles HTTP proxy requests to Central. type Handler struct { centralReachable atomic.Bool @@ -51,22 +65,9 @@ func NewProxyHandler(centralEndpoint string, centralCertificates []*x509.Certifi transport := newScopedTokenTransport(baseTransport, clusterIDGetter) proxy := &httputil.ReverseProxy{ - Transport: transport, - Rewrite: func(r *httputil.ProxyRequest) { - r.SetURL(centralBaseURL) - }, - ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { - log.Errorf("Proxy error: %v", err) - if errors.Is(err, errServiceUnavailable) { - pkghttputil.WriteError(w, - pkghttputil.Errorf(http.StatusServiceUnavailable, "proxy temporarily unavailable: %v", err), - ) - return - } - pkghttputil.WriteError(w, - pkghttputil.Errorf(http.StatusInternalServerError, "failed to contact central: %v", err), - ) - }, + Transport: transport, + Rewrite: func(r *httputil.ProxyRequest) { r.SetURL(centralBaseURL) }, + ErrorHandler: proxyErrorHandler, } restConfig, err := k8sutil.GetK8sInClusterConfig() @@ -122,27 +123,42 @@ func (h *Handler) validateRequest(request *http.Request) error { return nil } +// checkInternalTokenAPISupport checks if Central supports the internal token API capability. +// The proxy requires this capability to function; all requests are rejected if unsupported. +func checkInternalTokenAPISupport() error { + if !centralcaps.Has(centralsensor.InternalTokenAPISupported) { + return pkghttputil.NewError(http.StatusNotImplemented, + "proxy to Central is not available; Central does not support the internal token API required by this proxy") + } + return nil +} + // ServeHTTP handles incoming HTTP requests and proxies them to Central. func (h *Handler) ServeHTTP(writer http.ResponseWriter, request *http.Request) { + if err := checkInternalTokenAPISupport(); err != nil { + http.Error(writer, err.Error(), pkghttputil.StatusFromError(err)) + return + } + if err := h.validateRequest(request); err != nil { - pkghttputil.WriteError(writer, err) + http.Error(writer, err.Error(), pkghttputil.StatusFromError(err)) return } if h.authorizer == nil { log.Error("Authorizer is nil - this indicates a misconfiguration in the central proxy handler") - pkghttputil.WriteError(writer, pkghttputil.NewError(http.StatusInternalServerError, "authorizer not configured")) + http.Error(writer, "authorizer not configured", http.StatusInternalServerError) return } userInfo, err := h.authorizer.authenticate(request.Context(), request) if err != nil { - pkghttputil.WriteError(writer, err) + http.Error(writer, err.Error(), pkghttputil.StatusFromError(err)) return } if err := h.authorizer.authorize(request.Context(), userInfo, request); err != nil { - pkghttputil.WriteError(writer, err) + http.Error(writer, err.Error(), pkghttputil.StatusFromError(err)) return } diff --git a/sensor/common/centralproxy/handler_test.go b/sensor/common/centralproxy/handler_test.go index 12776408e0ba1..23cc84155738b 100644 --- a/sensor/common/centralproxy/handler_test.go +++ b/sensor/common/centralproxy/handler_test.go @@ -9,8 +9,10 @@ import ( "strings" "testing" + "github.com/stackrox/rox/pkg/centralsensor" pkghttputil "github.com/stackrox/rox/pkg/httputil" "github.com/stackrox/rox/sensor/common" + "github.com/stackrox/rox/sensor/common/centralcaps" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" authenticationv1 "k8s.io/api/authentication/v1" @@ -20,6 +22,17 @@ import ( k8sTesting "k8s.io/client-go/testing" ) +// setupCentralCapsForTest sets the Central capabilities required for the proxy to function. +// It returns a cleanup function that clears the capabilities. +func setupCentralCapsForTest(t *testing.T) { + centralcaps.Set([]centralsensor.CentralCapability{ + centralsensor.InternalTokenAPISupported, + }) + t.Cleanup(func() { + centralcaps.Set(nil) + }) +} + func TestValidateRequest(t *testing.T) { tests := []struct { name string @@ -100,6 +113,7 @@ func TestValidateRequest(t *testing.T) { func TestServeHTTP(t *testing.T) { t.Run("validation fails, proxy not called", func(t *testing.T) { + setupCentralCapsForTest(t) baseURL, err := url.Parse("https://central:443") require.NoError(t, err) @@ -116,6 +130,7 @@ func TestServeHTTP(t *testing.T) { }) t.Run("validation passes, request proxied", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -143,6 +158,8 @@ func TestServeHTTP(t *testing.T) { }) t.Run("proxy error handled by ErrorHandler", func(t *testing.T) { + setupCentralCapsForTest(t) + baseURL, err := url.Parse("https://central:443") require.NoError(t, err) @@ -158,6 +175,45 @@ func TestServeHTTP(t *testing.T) { assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), "failed to contact central") }) + + t.Run("request rejected when Central lacks internal token API capability", func(t *testing.T) { + // Explicitly clear central caps to simulate an older Central without the capability. + // Use cleanup to restore state and avoid cross-test interference. + centralcaps.Set(nil) + t.Cleanup(func() { + centralcaps.Set(nil) + }) + + var proxyCalled bool + mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + proxyCalled = true + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + baseURL, err := url.Parse("https://central:443") + require.NoError(t, err) + + h := &Handler{ + proxy: newReverseProxyForTest(baseURL, mockTransport), + authorizer: newAllowingAuthorizer(t), + } + h.centralReachable.Store(true) + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set("Authorization", "Bearer test-token") + w := httptest.NewRecorder() + + h.ServeHTTP(w, req) + + assert.False(t, proxyCalled, "proxy should not be called when capability is missing") + assert.Equal(t, http.StatusNotImplemented, w.Code) + assert.Contains(t, w.Body.String(), "proxy to Central is not available") + assert.Contains(t, w.Body.String(), "internal token API") + }) } func TestServeHTTP_ConstructsAbsoluteURLs(t *testing.T) { @@ -197,6 +253,7 @@ func TestServeHTTP_ConstructsAbsoluteURLs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + setupCentralCapsForTest(t) baseURL, err := url.Parse(tt.baseURL) assert.NoError(t, err) @@ -322,6 +379,7 @@ func TestExtractBearerToken(t *testing.T) { func TestServeHTTP_AuthorizationIntegration(t *testing.T) { t.Run("authorization failure prevents proxy call", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -355,6 +413,7 @@ func TestServeHTTP_AuthorizationIntegration(t *testing.T) { }) t.Run("no authorizer returns server error", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -382,6 +441,7 @@ func TestServeHTTP_AuthorizationIntegration(t *testing.T) { }) t.Run("authorization success allows proxy call", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -411,6 +471,7 @@ func TestServeHTTP_AuthorizationIntegration(t *testing.T) { func TestServeHTTP_NamespaceScopeBasedAuthorization(t *testing.T) { t.Run("empty namespace scope skips SAR check", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -443,6 +504,7 @@ func TestServeHTTP_NamespaceScopeBasedAuthorization(t *testing.T) { }) t.Run("specific namespace scope triggers SAR check", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -473,6 +535,7 @@ func TestServeHTTP_NamespaceScopeBasedAuthorization(t *testing.T) { }) t.Run("cluster-wide scope (*) triggers SAR check", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -503,6 +566,7 @@ func TestServeHTTP_NamespaceScopeBasedAuthorization(t *testing.T) { }) t.Run("namespace scope with valid permissions succeeds", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -556,6 +620,7 @@ func TestServeHTTP_NamespaceScopeBasedAuthorization(t *testing.T) { func TestServeHTTP_TransportFailure(t *testing.T) { t.Run("transport failure returns 500", func(t *testing.T) { + setupCentralCapsForTest(t) baseURL, err := url.Parse("https://central:443") require.NoError(t, err) @@ -573,6 +638,7 @@ func TestServeHTTP_TransportFailure(t *testing.T) { }) t.Run("initialization error returns 503", func(t *testing.T) { + setupCentralCapsForTest(t) baseURL, err := url.Parse("https://central:443") require.NoError(t, err) @@ -593,6 +659,7 @@ func TestServeHTTP_TransportFailure(t *testing.T) { func TestServeHTTP_TokenInjection(t *testing.T) { t.Run("token is injected into proxied request", func(t *testing.T) { + setupCentralCapsForTest(t) expectedToken := "dynamic-central-token-123" var capturedAuthHeader string @@ -624,6 +691,7 @@ func TestServeHTTP_TokenInjection(t *testing.T) { func TestServeHTTP_RequiresAuthentication(t *testing.T) { t.Run("missing token returns 401", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true @@ -652,6 +720,7 @@ func TestServeHTTP_RequiresAuthentication(t *testing.T) { }) t.Run("invalid token returns 401", func(t *testing.T) { + setupCentralCapsForTest(t) var proxyCalled bool mockTransport := pkghttputil.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { proxyCalled = true diff --git a/sensor/common/centralproxy/testutils.go b/sensor/common/centralproxy/testutils.go index 38605a9344321..7f23e448faf00 100644 --- a/sensor/common/centralproxy/testutils.go +++ b/sensor/common/centralproxy/testutils.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/pkg/errors" - pkghttputil "github.com/stackrox/rox/pkg/httputil" authenticationv1 "k8s.io/api/authentication/v1" authv1 "k8s.io/api/authorization/v1" "k8s.io/apimachinery/pkg/runtime" @@ -16,6 +15,17 @@ import ( k8sTesting "k8s.io/client-go/testing" ) +// newReverseProxyForTest creates a reverse proxy with the given transport and base URL. +// It uses the shared proxyErrorHandler from handler.go to ensure consistent error handling +// between production and test code. +func newReverseProxyForTest(baseURL *url.URL, transport http.RoundTripper) *httputil.ReverseProxy { + return &httputil.ReverseProxy{ + Transport: transport, + Rewrite: func(r *httputil.ProxyRequest) { r.SetURL(baseURL) }, + ErrorHandler: proxyErrorHandler, + } +} + // testClusterIDGetter is a test implementation of clusterIDGetter. type testClusterIDGetter struct { clusterID string @@ -53,22 +63,10 @@ func newTestHandler(t *testing.T, baseURL *url.URL, baseTransport http.RoundTrip token: token, } - proxy := &httputil.ReverseProxy{ - Transport: transport, - Rewrite: func(r *httputil.ProxyRequest) { - r.SetURL(baseURL) - }, - ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { - pkghttputil.WriteError(w, - pkghttputil.Errorf(http.StatusInternalServerError, "failed to contact central: %v", err), - ) - }, - } - return &Handler{ clusterIDGetter: &testClusterIDGetter{clusterID: "test-cluster-id"}, authorizer: authorizer, - proxy: proxy, + proxy: newReverseProxyForTest(baseURL, transport), } } @@ -80,29 +78,10 @@ func newTestHandlerWithTransportError(t *testing.T, baseURL *url.URL, authorizer err: transportErr, } - proxy := &httputil.ReverseProxy{ - Transport: transport, - Rewrite: func(r *httputil.ProxyRequest) { - r.SetURL(baseURL) - }, - ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { - // Match production error handling: return 503 for initialization errors - if errors.Is(err, errServiceUnavailable) { - pkghttputil.WriteError(w, - pkghttputil.Errorf(http.StatusServiceUnavailable, "proxy temporarily unavailable: %v", err), - ) - return - } - pkghttputil.WriteError(w, - pkghttputil.Errorf(http.StatusInternalServerError, "failed to contact central: %v", err), - ) - }, - } - return &Handler{ clusterIDGetter: &testClusterIDGetter{clusterID: "test-cluster-id"}, authorizer: authorizer, - proxy: proxy, + proxy: newReverseProxyForTest(baseURL, transport), } } From 3c56e7e9646ec9b1eaf2b16963196d2abe9ab5d9 Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Mon, 26 Jan 2026 20:39:55 +0100 Subject: [PATCH 012/232] ROX-32638: Populate VMReport discovered data from RHEL VM (#18602) --- .../virtualmachines/roxagent/cmd/cmd.go | 7 +- .../virtualmachines/roxagent/vsock/client.go | 21 +- .../roxagent/vsock/discovery.go | 326 ++++++++++ .../roxagent/vsock/discovery_test.go | 559 ++++++++++++++++++ 4 files changed, 901 insertions(+), 12 deletions(-) create mode 100644 compliance/virtualmachines/roxagent/vsock/discovery.go create mode 100644 compliance/virtualmachines/roxagent/vsock/discovery_test.go diff --git a/compliance/virtualmachines/roxagent/cmd/cmd.go b/compliance/virtualmachines/roxagent/cmd/cmd.go index 197d55064ee37..6e4cd207927b5 100644 --- a/compliance/virtualmachines/roxagent/cmd/cmd.go +++ b/compliance/virtualmachines/roxagent/cmd/cmd.go @@ -45,7 +45,12 @@ func RootCmd(ctx context.Context) *cobra.Command { "VSock port to connect with the virtual machine host.", ) cmd.Run = func(cmd *cobra.Command, _ []string) { - client := &vsock.Client{Port: cfg.VsockPort, Timeout: cfg.Timeout, Verbose: cfg.Verbose} + client := &vsock.Client{ + Port: cfg.VsockPort, + HostPath: cfg.IndexHostPath, + Timeout: cfg.Timeout, + Verbose: cfg.Verbose, + } if cfg.DaemonMode { if err := index.RunDaemon(ctx, cfg, client); err != nil { log.Errorf("Running indexer daemon: %v", err) diff --git a/compliance/virtualmachines/roxagent/vsock/client.go b/compliance/virtualmachines/roxagent/vsock/client.go index b1def0c527ca4..10d523d07182b 100644 --- a/compliance/virtualmachines/roxagent/vsock/client.go +++ b/compliance/virtualmachines/roxagent/vsock/client.go @@ -17,9 +17,10 @@ import ( var log = logging.LoggerForModule() type Client struct { - Port uint32 - Timeout time.Duration - Verbose bool + Port uint32 + HostPath string + Timeout time.Duration + Verbose bool } func (c *Client) SendIndexReport(report *v4.IndexReport) error { @@ -32,15 +33,13 @@ func (c *Client) SendIndexReport(report *v4.IndexReport) error { IndexV4: report, } - // Create VMReport with placeholder discovered data values. + // Discover VM data from host system + discovered := DiscoverVMData(c.HostPath) + + // Create VMReport with discovered data values. vmReport := &v1.VMReport{ - IndexReport: wrappedReport, - DiscoveredData: &v1.DiscoveredData{ - DetectedOs: v1.DetectedOS_UNKNOWN, // TODO: get proper values from VM. - OsVersion: "", - ActivationStatus: v1.ActivationStatus_ACTIVATION_UNSPECIFIED, - DnfMetadataStatus: v1.DnfMetadataStatus_DNF_METADATA_UNSPECIFIED, - }, + IndexReport: wrappedReport, + DiscoveredData: discovered, } if c.Verbose { diff --git a/compliance/virtualmachines/roxagent/vsock/discovery.go b/compliance/virtualmachines/roxagent/vsock/discovery.go new file mode 100644 index 0000000000000..8631115bd80e3 --- /dev/null +++ b/compliance/virtualmachines/roxagent/vsock/discovery.go @@ -0,0 +1,326 @@ +package vsock + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/go-multierror" + v1 "github.com/stackrox/rox/generated/internalapi/virtualmachine/v1" + "github.com/stackrox/rox/pkg/set" +) + +const ( + osReleasePath = "/etc/os-release" + entitlementDirPath = "/etc/pki/entitlement" + dnfCacheDirPath = "/var/cache/dnf" + entitlementKeySuffix = "-key.pem" + entitlementCertSuffix = ".pem" + // osReleaseIDKey is the key name for the ID field in /etc/os-release. + osReleaseIDKey = "ID" + // osReleaseVersionIDKey is the key name for the VERSION_ID field in /etc/os-release. + osReleaseVersionIDKey = "VERSION_ID" + // rhelOSID is the value of the ID field in /etc/os-release for Red Hat Enterprise Linux. + rhelOSID = "rhel" +) + +var defaultReposDirs = []string{ + "/etc/yum.repos.d", + "/etc/yum/repos.d", + "/etc/distro.repos.d", +} + +// DiscoverVMData discovers VM metadata from the host system. +// Returns discovered data with defaults (UNKNOWN/UNSPECIFIED) if discovery fails. +func DiscoverVMData(hostPath string) *v1.DiscoveredData { + result := &v1.DiscoveredData{} + + // Discover OS and version from /etc/os-release. + // Currently assumes RHEL systems: reads /etc/os-release, checks if ID field equals "rhel" to detect RHEL, + // and extracts VERSION_ID field as the OS version. Falls back to UNKNOWN for non-RHEL systems. + // This behavior is based on assumptions about /etc/os-release format and RHEL-specific values. + // Future improvements may include support for other OS types and more robust version detection. + detectedOS, osVersion, err := discoverOSAndVersionWithPath(hostPathFor(hostPath, osReleasePath)) + if err != nil { + log.Infof("Unable to discover OS and version: %v", err) + } else { + result.DetectedOs = detectedOS + result.OsVersion = osVersion + } + + // Discover activation status from /etc/pki/entitlement. + // Currently assumes RHEL entitlement certificates: checks for matching cert/key pairs by filename + // (e.g., "123-key.pem" and "123.pem"). System is considered ACTIVATED if at least one matching pair exists, + // otherwise INACTIVE. This behavior is based on assumptions about RHEL entitlement certificate naming + // conventions and file structure. Future improvements may include actual certificate validation and + // support for other activation mechanisms. + activationStatus, err := discoverActivationStatusWithPath(hostPathFor(hostPath, entitlementDirPath)) + if err != nil { + log.Infof("Observations during discovering activation status: %v", err) + } + // Some errors are of a warning nature, so we still set the discovery result. + result.ActivationStatus = activationStatus + + // Discover DNF metadata status. + dnfStatus, err := discoverDnfMetadataStatusWithPaths( + hostPath, + defaultReposDirs, + dnfCacheDirPath, + ) + if err != nil { + log.Infof("Observations during discovering DNF metadata status: %v", err) + } + // Some errors are of a warning nature, so we still set the discovery result. + result.DnfMetadataStatus = dnfStatus + + return result +} + +func hostPathFor(hostPath, path string) string { + if hostPath == "" || hostPath == string(os.PathSeparator) { + return path + } + // This join+clean approach is safe (no escape from hostPath) only when + // the input path is absolute (e.g., "/etc/os-release"). For example, + // hostPath="/host", path="/../etc/os-release" would clean to "/etc/os-release". + trimmedPath := strings.TrimPrefix(path, string(os.PathSeparator)) + return filepath.Clean(filepath.Join(hostPath, trimmedPath)) +} + +// discoverOSAndVersionWithPath reads os-release from the given path and returns DetectedOS and OSVersion. +func discoverOSAndVersionWithPath(path string) (v1.DetectedOS, string, error) { + file, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return v1.DetectedOS_UNKNOWN, "", fmt.Errorf("unsupported OS detected: missing %s: %w", path, err) + } + return v1.DetectedOS_UNKNOWN, "", fmt.Errorf("opening %s: %w", path, err) + } + defer func() { + if err := file.Close(); err != nil { + log.Warnf("Failed to close %s: %v", path, err) + } + }() + + osRelease, err := parseOSRelease(file) + if err != nil { + return v1.DetectedOS_UNKNOWN, "", fmt.Errorf("reading %s: %w", path, err) + } + + // Determine DetectedOS based on ID field + var detectedOS v1.DetectedOS + if id, ok := osRelease[osReleaseIDKey]; ok && strings.TrimSpace(id) == rhelOSID { + detectedOS = v1.DetectedOS_RHEL + } else { + detectedOS = v1.DetectedOS_UNKNOWN + } + + // Get OS version from VERSION_ID + var osVersion string + if versionID, ok := osRelease[osReleaseVersionIDKey]; ok { + if detectedOS != v1.DetectedOS_UNKNOWN { + return detectedOS, strings.TrimSpace(versionID), nil + } + // For non-RHEL systems, store the name of the OS (ID) and version (VERSION_ID) together. + // The version field is only informative and used for debugging in case of problems with scanning; + // we want to know which OS and version caused a potential issue. + osID := strings.TrimSpace(osRelease[osReleaseIDKey]) + if osID == "" { + osID = "unknown-OS" + } + osVersion = fmt.Sprintf("%s %s", osID, versionID) + } + + return detectedOS, osVersion, nil +} + +// parseOSRelease parses /etc/os-release key-value pairs. +// +// We copy ClairCore's os-release parser instead of importing it to avoid pulling +// in heavy scanner/indexer dependencies into roxagent. As Rob Pike put it, +// "a little bit of copying is better than a little bit of dependency." +// +// Source (copied, adapted to our usage): +// https://github.com/quay/claircore/blob/9f69181a1555935c8840a9191c91567e55b9cf0c/osrelease/scanner.go +func parseOSRelease(r io.Reader) (map[string]string, error) { + osRelease := make(map[string]string) + scanner := bufio.NewScanner(r) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + b := bytes.TrimSpace(scanner.Bytes()) + switch { + case len(b) == 0: + continue + case b[0] == '#': + continue + } + if !bytes.ContainsRune(b, '=') { + return nil, fmt.Errorf("osrelease: malformed line %q", scanner.Text()) + } + key, value, _ := strings.Cut(string(b), "=") + key = strings.TrimSpace(key) + value = strings.TrimSpace(value) + switch { + case len(value) == 0: + case value[0] == '\'': + value = strings.TrimFunc(value, func(r rune) bool { return r == '\'' }) + value = strings.ReplaceAll(value, `'\''`, `'`) + case value[0] == '"': + value = strings.TrimFunc(value, func(r rune) bool { return r == '"' }) + value = osReleaseDQReplacer.Replace(value) + default: + } + osRelease[key] = value + } + if err := scanner.Err(); err != nil { + return nil, err + } + return osRelease, nil +} + +var osReleaseDQReplacer = strings.NewReplacer( + "\\`", "`", + `\\`, `\`, + `\"`, `"`, + `\$`, `$`, +) + +// discoverActivationStatusWithPath checks the given path for matching cert/key pairs. +func discoverActivationStatusWithPath(path string) (v1.ActivationStatus, error) { + entries, err := os.ReadDir(path) + if err != nil { + return v1.ActivationStatus_ACTIVATION_UNSPECIFIED, fmt.Errorf("reading %s: %w", path, err) + } + + // Build sets of base names (without suffix) for keys and certs + keyBases := set.NewStringSet() + certBases := set.NewStringSet() + + // The `entries` are already sorted by name, so optimistically we just need to check two files. + // We can stop when first matching pair is found. + for _, entry := range entries { + if entry.IsDir() { + continue + } + + name := entry.Name() + if strings.HasSuffix(name, entitlementKeySuffix) { + base := strings.TrimSuffix(name, entitlementKeySuffix) + keyBases.Add(base) + if certBases.Contains(base) { + return v1.ActivationStatus_ACTIVE, nil + } + } else if strings.HasSuffix(name, entitlementCertSuffix) { + base := strings.TrimSuffix(name, entitlementCertSuffix) + certBases.Add(base) + if keyBases.Contains(base) { + return v1.ActivationStatus_ACTIVE, nil + } + } + } + + return v1.ActivationStatus_INACTIVE, nil +} + +// discoverDnfMetadataStatusWithPaths checks both repos and cache directories. +// Currently assumes RHEL DNF setup: checks for both (1) at least one *.repo file in a default reposdir +// (/etc/yum.repos.d, /etc/yum/repos.d, /etc/distro.repos.d) and (2) at least one directory in +// /var/cache/dnf containing "-rpms-" in its name. Metadata is considered AVAILABLE only if both +// conditions are met. +func discoverDnfMetadataStatusWithPaths(hostPath string, reposDirPaths []string, cacheDirPath string) (v1.DnfMetadataStatus, error) { + hasRepoDir, hasRepoFile, err := discoverDnfRepoFilePresence(hostPath, reposDirPaths) + if !hasRepoDir { + return v1.DnfMetadataStatus_DNF_METADATA_UNSPECIFIED, err + } + if !hasRepoFile { + return v1.DnfMetadataStatus_UNAVAILABLE, err + } + + hasCacheRepoDir, err := discoverDnfCacheRepoDirPresence(hostPath, cacheDirPath) + if err != nil { + return v1.DnfMetadataStatus_DNF_METADATA_UNSPECIFIED, err + } + + if hasCacheRepoDir { + return v1.DnfMetadataStatus_AVAILABLE, nil + } + return v1.DnfMetadataStatus_UNAVAILABLE, nil +} + +// discoverDnfRepoFilePresence reports whether any default reposdir contains a *.repo file. +// Assumptions and behavior: +// - Uses the default DNF reposdir locations: /etc/yum.repos.d, /etc/yum/repos.d, /etc/distro.repos.d. +// - Returns hasDir=true when at least one reposdir is readable. +// - Returns hasRepo=true as soon as a *.repo file is found in any reposdir. +// - If reposdirs are unreadable, or contain no *.repo files, returns an aggregated error describing each failure. +// - There is no support for DNF 5 defaults currently. +// - Tested against DNF 4 defaults (libdnf ConfigMain.cpp): +// https://github.com/rpm-software-management/libdnf/blob/53839f5bd88f378e57a1f1671b3db48d29984e24/libdnf/conf/ConfigMain.cpp +// - DNF 5 uses a different reposdir list (/etc/yum.repos.d, /etc/distro.repos.d, /usr/share/dnf5/repos.d), +// so this logic may miss repos configured only in the DNF 5 default path: +// https://github.com/rpm-software-management/dnf5/blob/185eaef1e0ad663bdb827a2179ab1df574a27d88/include/libdnf5/conf/const.hpp +func discoverDnfRepoFilePresence(hostPath string, reposDirPaths []string) (hasDir, hasRepo bool, err error) { + if len(reposDirPaths) == 0 { + return false, false, errors.New("missing repository directories") + } + + // Check for repo files in default reposdir locations. + var repoDirErrs *multierror.Error + for _, reposDirPath := range reposDirPaths { + reposPath := hostPathFor(hostPath, reposDirPath) + repoEntries, err := os.ReadDir(reposPath) + if err != nil { + repoDirErrs = multierror.Append(repoDirErrs, fmt.Errorf("reading %q: %w", reposPath, err)) + continue + } + // If at least one directory exists and is readable, we don't need to return DNF_METADATA_UNSPECIFIED. + hasDir = true + + for _, entry := range repoEntries { + if !entry.IsDir() && strings.HasSuffix(entry.Name(), ".repo") { + return true, true, nil + } + } + repoDirErrs = multierror.Append(repoDirErrs, fmt.Errorf("no .repo files found in %q", reposPath)) + + } + + return hasDir, hasRepo, repoDirErrs.ErrorOrNil() +} + +// discoverDnfCacheRepoDirPresence reports whether the DNF cache contains repo directories. +// Assumptions and behavior: +// - Uses the default system cachedir at /var/cache/dnf (libdnf Const.hpp): +// https://github.com/rpm-software-management/libdnf/blob/53839f5bd88f378e57a1f1671b3db48d29984e24/libdnf/conf/Const.hpp +// - Treats any subdirectory containing "-rpms-" as a repo cache directory. +// - Returns true as soon as a matching directory is found. +// - If the cache directory is missing, returns an "unsupported OS detected" error. +// - If the cache directory exists but cannot be read, returns a read error. +// - There is no support for DNF 5 defaults currently. +// - Tested against DNF 4 defaults; DNF 5 uses /var/cache/libdnf5: +// https://github.com/rpm-software-management/dnf5/blob/185eaef1e0ad663bdb827a2179ab1df574a27d88/include/libdnf5/conf/const.hpp +func discoverDnfCacheRepoDirPresence(hostPath, cacheDirPath string) (bool, error) { + cachePath := hostPathFor(hostPath, cacheDirPath) + cacheEntries, err := os.ReadDir(cachePath) + if err != nil { + if os.IsNotExist(err) { + return false, fmt.Errorf("unsupported OS detected: missing %s: %w", cachePath, err) + } + return false, fmt.Errorf("reading %s: %w", cachePath, err) + } + + for _, entry := range cacheEntries { + if entry.IsDir() { + // Check if it looks like a repo directory (contains "-rpms-" pattern) + if strings.Contains(entry.Name(), "-rpms-") { + return true, nil + } + } + } + return false, nil +} diff --git a/compliance/virtualmachines/roxagent/vsock/discovery_test.go b/compliance/virtualmachines/roxagent/vsock/discovery_test.go new file mode 100644 index 0000000000000..a4454326544c3 --- /dev/null +++ b/compliance/virtualmachines/roxagent/vsock/discovery_test.go @@ -0,0 +1,559 @@ +package vsock + +import ( + "os" + "path/filepath" + "strings" + "testing" + + v1 "github.com/stackrox/rox/generated/internalapi/virtualmachine/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDiscoverOSAndVersion(t *testing.T) { + tests := []struct { + name string + osRelease string + expectedOS v1.DetectedOS + expectedVersion string + }{ + { + name: "RHEL 8.10", + osRelease: `NAME="Red Hat Enterprise Linux" +VERSION="8.10 (Ootpa)" +ID="rhel" +ID_LIKE="fedora" +VERSION_ID="8.10" +PLATFORM_ID="platform:el8" +PRETTY_NAME="Red Hat Enterprise Linux 8.10 (Ootpa)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:redhat:enterprise_linux:8::baseos" +HOME_URL="https://www.redhat.com/" +DOCUMENTATION_URL="https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8" +BUG_REPORT_URL="https://issues.redhat.com/" + +REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 8" +REDHAT_BUGZILLA_PRODUCT_VERSION=8.10 +REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" +REDHAT_SUPPORT_PRODUCT_VERSION="8.10"`, + expectedOS: v1.DetectedOS_RHEL, + expectedVersion: "8.10", + }, + { + name: "RHEL 9", + osRelease: `ID="rhel" +VERSION_ID="9.2"`, + expectedOS: v1.DetectedOS_RHEL, + expectedVersion: "9.2", + }, + { + name: "Non-RHEL OS", + osRelease: `ID="debian" +VERSION_ID="12"`, + expectedOS: v1.DetectedOS_UNKNOWN, + expectedVersion: "debian 12", + }, + { + name: "ID field missing but VERSION_ID is present", + osRelease: `VERSION_ID="12"`, + expectedOS: v1.DetectedOS_UNKNOWN, + expectedVersion: "unknown-OS 12", + }, + { + name: "ID and VERSION_ID fields missing", + osRelease: ``, + expectedOS: v1.DetectedOS_UNKNOWN, + expectedVersion: "", + }, + { + name: "Unknown OS with version only", + osRelease: `VERSION_ID="10"`, + expectedOS: v1.DetectedOS_UNKNOWN, + expectedVersion: "unknown-OS 10", + }, + { + name: "Missing VERSION_ID", + osRelease: `ID="rhel"`, + expectedOS: v1.DetectedOS_RHEL, + expectedVersion: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + testOsReleasePath := filepath.Join(tmpDir, "os-release") + + err := os.WriteFile(testOsReleasePath, []byte(tt.osRelease), 0644) + require.NoError(t, err) + + // Use a testable version that accepts a path + detectedOS, osVersion, err := discoverOSAndVersionWithPath(testOsReleasePath) + require.NoError(t, err) + assert.Equal(t, tt.expectedOS, detectedOS) + assert.Equal(t, tt.expectedVersion, osVersion) + }) + } +} + +func TestDiscoverOSAndVersion_MissingFile(t *testing.T) { + tmpDir := t.TempDir() + missingPath := filepath.Join(tmpDir, "nonexistent") + + detectedOS, osVersion, err := discoverOSAndVersionWithPath(missingPath) + assert.Error(t, err) + assert.Equal(t, v1.DetectedOS_UNKNOWN, detectedOS) + assert.Equal(t, "", osVersion) +} + +func TestDiscoverOSAndVersion_MalformedOSRelease(t *testing.T) { + tmpDir := t.TempDir() + testOsReleasePath := filepath.Join(tmpDir, "os-release") + + err := os.WriteFile(testOsReleasePath, []byte("ID=rhel\nMALFORMED_LINE"), 0644) + require.NoError(t, err) + + detectedOS, osVersion, err := discoverOSAndVersionWithPath(testOsReleasePath) + assert.Error(t, err) + assert.Equal(t, v1.DetectedOS_UNKNOWN, detectedOS) + assert.Equal(t, "", osVersion) +} + +func TestDiscoverDnfRepoFilePresence(t *testing.T) { + tests := map[string]struct { + setup func(t *testing.T) (string, []string) + expectedHasDir bool + expectedHasRepo bool + expectedErrParts []string + }{ + "should return true when repo file exists in reposdir": { + setup: func(t *testing.T) (string, []string) { + hostPath := t.TempDir() + reposDirPath := "/etc/yum.repos.d" + reposPath := hostPathFor(hostPath, reposDirPath) + require.NoError(t, os.MkdirAll(reposPath, 0750)) + repoFilePath := filepath.Join(reposPath, "test.repo") + require.NoError(t, os.WriteFile(repoFilePath, []byte("content"), 0600)) + return hostPath, []string{reposDirPath} + }, + expectedHasDir: true, + expectedHasRepo: true, + }, + "should return error when all reposdirs are unreadable": { + setup: func(t *testing.T) (string, []string) { + hostPath := t.TempDir() + return hostPath, []string{"/etc/yum.repos.d", "/etc/yum/repos.d"} + }, + expectedHasDir: false, + expectedHasRepo: false, + expectedErrParts: []string{"reading", "no such file or directory"}, + }, + "should return error when reposdirs are readable but no repo files exist": { + setup: func(t *testing.T) (string, []string) { + hostPath := t.TempDir() + reposDirPaths := []string{"/etc/yum.repos.d", "/etc/yum/repos.d"} + for _, reposDirPath := range reposDirPaths { + reposPath := hostPathFor(hostPath, reposDirPath) + require.NoError(t, os.MkdirAll(reposPath, 0750)) + require.NoError(t, os.WriteFile(filepath.Join(reposPath, "not-a-repo.txt"), []byte("content"), 0600)) + } + return hostPath, reposDirPaths + }, + expectedHasDir: true, + expectedHasRepo: false, + expectedErrParts: []string{"no .repo files found"}, + }, + "should return true when repo file exists even if other reposdir is missing": { + setup: func(t *testing.T) (string, []string) { + hostPath := t.TempDir() + reposDirPaths := []string{"/etc/yum.repos.d", "/etc/yum/repos.d"} + reposPath := hostPathFor(hostPath, reposDirPaths[0]) + require.NoError(t, os.MkdirAll(reposPath, 0750)) + require.NoError(t, os.WriteFile(filepath.Join(reposPath, "example.repo"), []byte("content"), 0600)) + return hostPath, reposDirPaths + }, + expectedHasDir: true, + expectedHasRepo: true, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + hostPath, reposDirPaths := tt.setup(t) + hasDir, hasRepo, err := discoverDnfRepoFilePresence(hostPath, reposDirPaths) + assert.Equal(t, tt.expectedHasDir, hasDir) + assert.Equal(t, tt.expectedHasRepo, hasRepo) + if len(tt.expectedErrParts) == 0 { + assert.NoError(t, err) + return + } + require.Error(t, err) + for _, part := range tt.expectedErrParts { + assert.Contains(t, err.Error(), part) + } + }) + } +} + +func TestDiscoverDnfCacheRepoDirPresence(t *testing.T) { + tests := map[string]struct { + setup func(t *testing.T) (string, string) + expectedFound bool + expectedErrParts []string + }{ + "should return true when cache dir contains repo directory": { + setup: func(t *testing.T) (string, string) { + hostPath := t.TempDir() + cacheDirPath := "/var/cache/dnf" + cachePath := hostPathFor(hostPath, cacheDirPath) + require.NoError(t, os.MkdirAll(filepath.Join(cachePath, "rhel-9-for-x86_64-appstream-rpms-123"), 0750)) + return hostPath, cacheDirPath + }, + expectedFound: true, + }, + "should return false when cache dir has no repo directories": { + setup: func(t *testing.T) (string, string) { + hostPath := t.TempDir() + cacheDirPath := "/var/cache/dnf" + cachePath := hostPathFor(hostPath, cacheDirPath) + require.NoError(t, os.MkdirAll(filepath.Join(cachePath, "some-other-dir"), 0750)) + return hostPath, cacheDirPath + }, + expectedFound: false, + }, + "should return error when cache dir is missing": { + setup: func(t *testing.T) (string, string) { + hostPath := t.TempDir() + return hostPath, "/var/cache/dnf" + }, + expectedFound: false, + expectedErrParts: []string{"unsupported OS detected: missing"}, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + hostPath, cacheDirPath := tt.setup(t) + found, err := discoverDnfCacheRepoDirPresence(hostPath, cacheDirPath) + assert.Equal(t, tt.expectedFound, found) + if len(tt.expectedErrParts) == 0 { + assert.NoError(t, err) + return + } + require.Error(t, err) + for _, part := range tt.expectedErrParts { + assert.Contains(t, err.Error(), part) + } + }) + } +} + +func TestParseOSRelease_QuotedValues(t *testing.T) { + input := strings.NewReader(`# comment +ID='rhel' +VERSION_ID="9.4" +NAME="Red Hat \$NAME" +`) + + values, err := parseOSRelease(input) + require.NoError(t, err) + assert.Equal(t, "rhel", values["ID"]) + assert.Equal(t, "9.4", values["VERSION_ID"]) + assert.Equal(t, "Red Hat $NAME", values["NAME"]) +} + +func TestHostPathFor(t *testing.T) { + tests := []struct { + name string + hostPath string + path string + expected string + }{ + { + name: "Empty host path uses original path", + hostPath: "", + path: "/etc/os-release", + expected: "/etc/os-release", + }, + { + name: "Root host path uses original path", + hostPath: "/", + path: "/etc/os-release", + expected: "/etc/os-release", + }, + { + name: "Prefix host path joins with absolute path", + hostPath: "/host", + path: "/etc/os-release", + expected: "/host/etc/os-release", + }, + { + name: "Prefix host path joins with nested path", + hostPath: "/host/rootfs", + path: "/var/cache/dnf", + expected: "/host/rootfs/var/cache/dnf", + }, + { + name: "Cleaned path removes dot segments", + hostPath: "/root/../host", + path: "/var/lib/../cache//dnf/", + expected: "/host/var/cache/dnf", + }, + { + name: "Cleaned path collapses extra slashes", + hostPath: "/host//", + path: "/etc/os-release", + expected: "/host/etc/os-release", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expected, hostPathFor(tt.hostPath, tt.path)) + }) + } +} + +func TestDiscoverActivationStatus(t *testing.T) { + tests := []struct { + name string + files []string + expectedStatus v1.ActivationStatus + }{ + { + name: "Activated - single pair", + files: []string{ + "3341241341658386286-key.pem", + "3341241341658386286.pem", + }, + expectedStatus: v1.ActivationStatus_ACTIVE, + }, + { + name: "Activated - multiple pairs", + files: []string{ + "111-key.pem", + "111.pem", + "222-key.pem", + "222.pem", + }, + expectedStatus: v1.ActivationStatus_ACTIVE, + }, + { + name: "Unactivated - empty directory", + files: []string{}, + expectedStatus: v1.ActivationStatus_INACTIVE, + }, + { + name: "Unactivated - only key file", + files: []string{ + "3341241341658386286-key.pem", + }, + expectedStatus: v1.ActivationStatus_INACTIVE, + }, + { + name: "Unactivated - only cert file", + files: []string{ + "3341241341658386286.pem", + }, + expectedStatus: v1.ActivationStatus_INACTIVE, + }, + { + name: "Unactivated - mismatched names", + files: []string{ + "111-key.pem", + "222.pem", + }, + expectedStatus: v1.ActivationStatus_INACTIVE, + }, + { + name: "Unactivated - other files", + files: []string{ + "some-other-file.txt", + }, + expectedStatus: v1.ActivationStatus_INACTIVE, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir := t.TempDir() + + for _, filename := range tt.files { + filePath := filepath.Join(tmpDir, filename) + err := os.WriteFile(filePath, []byte("test content"), 0644) + require.NoError(t, err) + } + + activationStatus, err := discoverActivationStatusWithPath(tmpDir) + assert.NoError(t, err) + assert.Equal(t, tt.expectedStatus, activationStatus) + }) + } +} + +func TestDiscoverActivationStatus_MissingDir(t *testing.T) { + tmpDir := t.TempDir() + missingPath := filepath.Join(tmpDir, "nonexistent") + + activationStatus, err := discoverActivationStatusWithPath(missingPath) + assert.Error(t, err) + assert.Equal(t, v1.ActivationStatus_ACTIVATION_UNSPECIFIED, activationStatus) +} + +func TestDiscoverDnfMetadataStatus(t *testing.T) { + tests := map[string]struct { + reposDirs []string + repoDirFiles map[string][]string + cacheDirs []string + expectedStatus v1.DnfMetadataStatus + expectedErrs []string + }{ + "should report available when repo file and cache dir exist": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{"yum.repos.d": {"rhel9.repo"}}, + cacheDirs: []string{"rhel-9-for-x86_64-appstream-rpms-3dc6dc0880df5476"}, + expectedStatus: v1.DnfMetadataStatus_AVAILABLE, + }, + "should report available when repo file is in second reposdir": { + reposDirs: []string{"yum.repos.d", "yum/repos.d"}, + repoDirFiles: map[string][]string{ + "yum.repos.d": {}, + "yum/repos.d": {"rhel9.repo"}, + }, + cacheDirs: []string{"rhel-9-for-x86_64-appstream-rpms-3dc6dc0880df5476"}, + expectedStatus: v1.DnfMetadataStatus_AVAILABLE, + }, + "should report available with multiple repo files and cache dirs": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{ + "yum.repos.d": {"baseos.repo", "appstream.repo"}, + }, + cacheDirs: []string{ + "rhel-9-for-x86_64-appstream-rpms-3dc6dc0880df5476", + "rhel-9-for-x86_64-baseos-rpms-a2cdae14f4ed6c20", + }, + expectedStatus: v1.DnfMetadataStatus_AVAILABLE, + }, + "should report unavailable when no repo files exist": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{"yum.repos.d": {}}, + cacheDirs: []string{"rhel-9-for-x86_64-appstream-rpms-3dc6dc0880df5476"}, + expectedStatus: v1.DnfMetadataStatus_UNAVAILABLE, + expectedErrs: []string{"no .repo files found"}, + }, + "should report unavailable when no cache dirs exist": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{"yum.repos.d": {"rhel9.repo"}}, + cacheDirs: []string{}, + expectedStatus: v1.DnfMetadataStatus_UNAVAILABLE, + }, + "should report unavailable when cache dir lacks -rpms- pattern": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{"yum.repos.d": {"rhel9.repo"}}, + cacheDirs: []string{"some-other-dir"}, + expectedStatus: v1.DnfMetadataStatus_UNAVAILABLE, + }, + "should report unavailable for empty directories": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{"yum.repos.d": {}}, + cacheDirs: []string{}, + expectedStatus: v1.DnfMetadataStatus_UNAVAILABLE, + expectedErrs: []string{"no .repo files found"}, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + tmpDir := t.TempDir() + cacheDir := filepath.Join(tmpDir, "dnf") + require.NoError(t, os.MkdirAll(cacheDir, 0755)) + + reposDirPaths := make([]string, 0, len(tt.reposDirs)) + for _, dir := range tt.reposDirs { + dirPath := filepath.Join(tmpDir, dir) + reposDirPaths = append(reposDirPaths, dirPath) + if files, ok := tt.repoDirFiles[dir]; ok { + require.NoError(t, os.MkdirAll(dirPath, 0755)) + for _, filename := range files { + filePath := filepath.Join(dirPath, filename) + err := os.WriteFile(filePath, []byte("[repo]\nname=test"), 0644) + require.NoError(t, err) + } + } + } + + for _, dirname := range tt.cacheDirs { + dirPath := filepath.Join(cacheDir, dirname) + err := os.MkdirAll(dirPath, 0755) + require.NoError(t, err) + } + + dnfStatus, err := discoverDnfMetadataStatusWithPaths("", reposDirPaths, cacheDir) + if len(tt.expectedErrs) == 0 { + assert.NoError(t, err) + } else { + require.Error(t, err) + for _, expectedErr := range tt.expectedErrs { + assert.ErrorContains(t, err, expectedErr) + } + } + assert.Equal(t, tt.expectedStatus, dnfStatus) + }) + } +} + +func TestDiscoverDnfMetadataStatus_MissingDirs(t *testing.T) { + tests := map[string]struct { + reposDirs []string + repoDirFiles map[string][]string + cacheDir string + setupCache func(string) error + errorContains string + }{ + "should return error when repos dir is missing": { + reposDirs: []string{"nonexistent-repos"}, + repoDirFiles: map[string][]string{}, + cacheDir: "dnf", + setupCache: func(path string) error { return os.MkdirAll(path, 0755) }, + errorContains: "reading", + }, + "should return error when cache dir is missing": { + reposDirs: []string{"yum.repos.d"}, + repoDirFiles: map[string][]string{ + "yum.repos.d": {"test.repo"}, + }, + cacheDir: "nonexistent-cache", + setupCache: nil, + errorContains: "unsupported OS detected: missing", + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + tmpDir := t.TempDir() + cacheDirPath := filepath.Join(tmpDir, tt.cacheDir) + if tt.setupCache != nil { + require.NoError(t, tt.setupCache(cacheDirPath)) + } + + reposDirPaths := make([]string, 0, len(tt.reposDirs)) + for _, dir := range tt.reposDirs { + dirPath := filepath.Join(tmpDir, dir) + reposDirPaths = append(reposDirPaths, dirPath) + if files, ok := tt.repoDirFiles[dir]; ok { + require.NoError(t, os.MkdirAll(dirPath, 0755)) + for _, filename := range files { + filePath := filepath.Join(dirPath, filename) + require.NoError(t, os.WriteFile(filePath, []byte("[repo]"), 0644)) + } + } + } + + dnfStatus, err := discoverDnfMetadataStatusWithPaths("", reposDirPaths, cacheDirPath) + assert.Error(t, err) + assert.ErrorContains(t, err, tt.errorContains) + assert.Equal(t, v1.DnfMetadataStatus_DNF_METADATA_UNSPECIFIED, dnfStatus) + }) + } +} From 2503e3ad8414c00cd43f5515e7aab5b30b21f282 Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Mon, 26 Jan 2026 14:51:28 -0500 Subject: [PATCH 013/232] ROX-31575: Integrate ImageCVEInfo lookup table into image enrichment (#18572) Co-authored-by: David Caravello <119438707+dcaravel@users.noreply.github.com> --- .../cve/converter/utils/convert_utils_v2.go | 20 +- central/cve/image/info/datastore/datastore.go | 1 - .../image/info/datastore/datastore_impl.go | 23 +- .../v2/datastore/store/postgres/store.go | 5 +- central/graphdb/testutils/datastore.go | 3 + central/graphql/resolvers/test_setup_utils.go | 7 +- central/image/datastore/datastore.go | 5 +- .../datastore_bench_postgres_test.go | 7 +- central/image/datastore/datastore_impl.go | 102 +++++++- .../datastore_impl_flat_postgres_test.go | 218 +++++++++++++++++- .../datastore/datastore_test_constructors.go | 4 +- central/image/datastore/singleton.go | 3 +- .../datastore/store/v2/postgres/store.go | 5 +- central/imagev2/datastore/datastore.go | 5 +- .../datastore_bench_postgres_test.go | 7 +- central/imagev2/datastore/datastore_impl.go | 102 +++++++- .../datastore/datastore_test_constructors.go | 4 +- central/imagev2/datastore/singleton.go | 3 +- .../imagev2/datastore/store/postgres/store.go | 5 +- .../datastoretest/datastore_impl_test.go | 4 +- central/pruning/pruning_test.go | 4 + central/reprocessor/reprocessor_test.go | 7 +- central/views/deployments/view_test.go | 3 + central/views/imagecveflat/view_test.go | 3 + .../querymgr/query_manager_impl_test.go | 4 + .../manager_impl_flat_cve_data_test.go | 4 + generated/storage/cve.pb.go | 109 +++++---- generated/storage/cve_vtproto.pb.go | 92 ++++++++ .../violationmessages/printer.go | 1 + pkg/cve/cve.go | 8 + pkg/postgres/schema/image_cves_v2.go | 1 + proto/storage/cve.proto | 3 + proto/storage/proto.lock | 5 + 33 files changed, 674 insertions(+), 103 deletions(-) diff --git a/central/cve/converter/utils/convert_utils_v2.go b/central/cve/converter/utils/convert_utils_v2.go index 9379ec6e443e5..fe6546ec72a99 100644 --- a/central/cve/converter/utils/convert_utils_v2.go +++ b/central/cve/converter/utils/convert_utils_v2.go @@ -24,6 +24,7 @@ func ImageCVEV2ToEmbeddedVulnerability(vuln *storage.ImageCVEV2) *storage.Embedd PublishedOn: vuln.GetCveBaseInfo().GetPublishedOn(), LastModified: vuln.GetCveBaseInfo().GetLastModified(), FirstSystemOccurrence: vuln.GetCveBaseInfo().GetCreatedAt(), + FixAvailableTimestamp: vuln.GetFixAvailableTimestamp(), Severity: vuln.GetSeverity(), CvssMetrics: vuln.GetCveBaseInfo().GetCvssMetrics(), NvdCvss: vuln.GetNvdcvss(), @@ -94,15 +95,16 @@ func EmbeddedVulnerabilityToImageCVEV2(imageID string, componentID string, index Epss: from.GetEpss(), ScoreVersion: scoreVersion, }, - Cvss: from.GetCvss(), - Nvdcvss: nvdCvss, - NvdScoreVersion: nvdVersion, - Severity: from.GetSeverity(), - FirstImageOccurrence: from.GetFirstImageOccurrence(), - State: from.GetState(), - IsFixable: from.GetFixedBy() != "", - ImpactScore: impactScore, - Advisory: from.GetAdvisory(), + Cvss: from.GetCvss(), + Nvdcvss: nvdCvss, + NvdScoreVersion: nvdVersion, + Severity: from.GetSeverity(), + FirstImageOccurrence: from.GetFirstImageOccurrence(), + FixAvailableTimestamp: from.GetFixAvailableTimestamp(), + State: from.GetState(), + IsFixable: from.GetFixedBy() != "", + ImpactScore: impactScore, + Advisory: from.GetAdvisory(), } if !features.FlattenImageData.Enabled() { ret.ImageId = imageID diff --git a/central/cve/image/info/datastore/datastore.go b/central/cve/image/info/datastore/datastore.go index 669648c936f5a..745f3654df00f 100644 --- a/central/cve/image/info/datastore/datastore.go +++ b/central/cve/image/info/datastore/datastore.go @@ -6,7 +6,6 @@ import ( "github.com/stackrox/rox/central/cve/image/info/datastore/store" pgStore "github.com/stackrox/rox/central/cve/image/info/datastore/store/postgres" - v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/postgres" diff --git a/central/cve/image/info/datastore/datastore_impl.go b/central/cve/image/info/datastore/datastore_impl.go index 6fd45c3b082a3..58c5c2fff5ec7 100644 --- a/central/cve/image/info/datastore/datastore_impl.go +++ b/central/cve/image/info/datastore/datastore_impl.go @@ -2,14 +2,12 @@ package datastore import ( "context" - "math" "github.com/stackrox/rox/central/cve/image/info/datastore/store" - "github.com/stackrox/rox/pkg/protocompat" - "github.com/stackrox/rox/pkg/sliceutils" - v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/protocompat" + "github.com/stackrox/rox/pkg/sliceutils" ) type datastoreImpl struct { @@ -65,16 +63,13 @@ func (ds *datastoreImpl) UpsertMany(ctx context.Context, infos []*storage.ImageC } newInfoMap := make(map[string]*storage.ImageCVEInfo) oldInfoMap := make(map[string]*storage.ImageCVEInfo) - // Populate both maps at the same time by looping through up to the length of the longer list to save time - for i := range int(math.Max(float64(len(infos)), float64(len(existing)))) { - // Check if this was the shorter list - if i < len(infos) { - newInfoMap[infos[i].GetId()] = infos[i] - } - // Same as above - if i < len(existing) { - oldInfoMap[infos[i].GetId()] = existing[i] - } + // Populate both maps separately - can't use index-based loop because + // existing may not be in the same order as infos + for _, info := range infos { + newInfoMap[info.GetId()] = info + } + for _, info := range existing { + oldInfoMap[info.GetId()] = info } // Create our list that we're going to actually upsert toUpsert := make([]*storage.ImageCVEInfo, 0) diff --git a/central/cve/image/v2/datastore/store/postgres/store.go b/central/cve/image/v2/datastore/store/postgres/store.go index d0689c1074f50..38a1d0630bb95 100644 --- a/central/cve/image/v2/datastore/store/postgres/store.go +++ b/central/cve/image/v2/datastore/store/postgres/store.go @@ -119,10 +119,11 @@ func insertIntoImageCvesV2(batch *pgx.Batch, obj *storage.ImageCVEV2) error { obj.GetAdvisory().GetName(), obj.GetAdvisory().GetLink(), pgutils.NilOrString(obj.GetImageIdV2()), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, } - finalStr := "INSERT INTO image_cves_v2 (Id, ImageId, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, Advisory_Name, Advisory_Link, ImageIdV2, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageId = EXCLUDED.ImageId, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, Advisory_Name = EXCLUDED.Advisory_Name, Advisory_Link = EXCLUDED.Advisory_Link, ImageIdV2 = EXCLUDED.ImageIdV2, serialized = EXCLUDED.serialized" + finalStr := "INSERT INTO image_cves_v2 (Id, ImageId, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, Advisory_Name, Advisory_Link, ImageIdV2, FixAvailableTimestamp, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageId = EXCLUDED.ImageId, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, Advisory_Name = EXCLUDED.Advisory_Name, Advisory_Link = EXCLUDED.Advisory_Link, ImageIdV2 = EXCLUDED.ImageIdV2, FixAvailableTimestamp = EXCLUDED.FixAvailableTimestamp, serialized = EXCLUDED.serialized" batch.Queue(finalStr, values...) return nil @@ -147,6 +148,7 @@ var copyColsImageCvesV2 = []string{ "advisory_name", "advisory_link", "imageidv2", + "fixavailabletimestamp", "serialized", } @@ -199,6 +201,7 @@ func copyFromImageCvesV2(ctx context.Context, s pgSearch.Deleter, tx *postgres.T obj.GetAdvisory().GetName(), obj.GetAdvisory().GetLink(), pgutils.NilOrString(obj.GetImageIdV2()), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, }, nil }) diff --git a/central/graphdb/testutils/datastore.go b/central/graphdb/testutils/datastore.go index fda32e355feb0..64a757be049b7 100644 --- a/central/graphdb/testutils/datastore.go +++ b/central/graphdb/testutils/datastore.go @@ -7,6 +7,7 @@ import ( clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" cveConverterV2 "github.com/stackrox/rox/central/cve/converter/v2" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" deploymentDataStore "github.com/stackrox/rox/central/deployment/datastore" imageDataStore "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -307,11 +308,13 @@ func NewTestGraphDataStore(t *testing.T) (TestGraphDataStore, error) { s.pgtestbase = pgtest.ForT(t) s.nodeStore = nodeDataStore.GetTestPostgresDataStore(t, s.GetPostgresPool()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, s.GetPostgresPool()) s.imageStore = imageDataStore.NewWithPostgres( imagePostgresV2.New(s.GetPostgresPool(), false, concurrency.NewKeyFence()), riskDS.GetTestPostgresDataStore(t, s.GetPostgresPool()), ranking.NewRanker(), ranking.NewRanker(), + imageCVEInfo, ) s.deploymentStore, err = deploymentDataStore.GetTestPostgresDataStore(t, s.GetPostgresPool()) if err != nil { diff --git a/central/graphql/resolvers/test_setup_utils.go b/central/graphql/resolvers/test_setup_utils.go index 56b485481dd8c..33c9b980f1acc 100644 --- a/central/graphql/resolvers/test_setup_utils.go +++ b/central/graphql/resolvers/test_setup_utils.go @@ -12,6 +12,7 @@ import ( clusterCVEEdgePostgres "github.com/stackrox/rox/central/clustercveedge/datastore/store/postgres" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" clusterCVEPostgres "github.com/stackrox/rox/central/cve/cluster/datastore/store/postgres" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEV2DS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEV2Postgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" nodeCVEDataStore "github.com/stackrox/rox/central/cve/node/datastore" @@ -141,7 +142,7 @@ func SetupTestResolver(t testing.TB, datastores ...interface{}) (*Resolver, *gra } // CreateTestImageV2Datastore creates image datastore for testing -func CreateTestImageV2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageDS.DataStore { +func CreateTestImageV2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageDS.DataStore { risks := mockRisks.NewMockDataStore(ctrl) risks.EXPECT().RemoveRisk(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return imageDS.NewWithPostgres( @@ -149,11 +150,12 @@ func CreateTestImageV2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctrl risks, ranking.NewRanker(), ranking.NewRanker(), + imageCVEInfoDS.GetTestPostgresDataStore(t, testDB.DB), ) } // CreateTestImageV2V2Datastore creates image datastore for testing -func CreateTestImageV2V2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageV2DS.DataStore { +func CreateTestImageV2V2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageV2DS.DataStore { risks := mockRisks.NewMockDataStore(ctrl) risks.EXPECT().RemoveRisk(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return imageV2DS.NewWithPostgres( @@ -161,6 +163,7 @@ func CreateTestImageV2V2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctr risks, ranking.NewRanker(), ranking.NewRanker(), + imageCVEInfoDS.GetTestPostgresDataStore(t, testDB.DB), ) } diff --git a/central/image/datastore/datastore.go b/central/image/datastore/datastore.go index 5d4fee9f0987e..f16ac3d335117 100644 --- a/central/image/datastore/datastore.go +++ b/central/image/datastore/datastore.go @@ -3,6 +3,7 @@ package datastore import ( "context" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/store" "github.com/stackrox/rox/central/ranking" riskDS "github.com/stackrox/rox/central/risk/datastore" @@ -40,8 +41,8 @@ type DataStore interface { // NewWithPostgres returns a new instance of DataStore using the input store, and searcher. // noUpdateTimestamps controls whether timestamps are automatically updated when upserting images. // This should be set to `false` except for some tests. -func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) DataStore { - ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker) +func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, imageCVEInfo imageCVEInfoDS.DataStore) DataStore { + ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker, imageCVEInfo) go ds.initializeRankers() return ds } diff --git a/central/image/datastore/datastore_bench_postgres_test.go b/central/image/datastore/datastore_bench_postgres_test.go index 123dfb8c5337c..e6019176f342f 100644 --- a/central/image/datastore/datastore_bench_postgres_test.go +++ b/central/image/datastore/datastore_bench_postgres_test.go @@ -9,6 +9,7 @@ import ( "fmt" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" "github.com/stackrox/rox/central/ranking" @@ -29,7 +30,8 @@ func BenchmarkImageGetMany(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) + datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) ids := make([]string, 0, 100) images := make([]*storage.Image, 0, 100) @@ -68,7 +70,8 @@ func BenchmarkImageUpsert(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) + datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) images := make([]*storage.Image, 0, 100) for i := 0; i < 100; i++ { diff --git a/central/image/datastore/datastore_impl.go b/central/image/datastore/datastore_impl.go index 7bf178ff7f2c7..d861084214664 100644 --- a/central/image/datastore/datastore_impl.go +++ b/central/image/datastore/datastore_impl.go @@ -6,6 +6,7 @@ import ( "time" "github.com/pkg/errors" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/image/datastore/store" "github.com/stackrox/rox/central/image/views" @@ -15,10 +16,13 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/cve" "github.com/stackrox/rox/pkg/errorhelpers" + "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/images/enricher" imageTypes "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/scancomponent" @@ -40,10 +44,13 @@ type datastoreImpl struct { imageRanker *ranking.Ranker imageComponentRanker *ranking.Ranker + + imageCVEInfoDS imageCVEInfoDS.DataStore } func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, - imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) *datastoreImpl { + imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, + imageCVEInfo imageCVEInfoDS.DataStore) *datastoreImpl { ds := &datastoreImpl{ storage: storage, @@ -52,6 +59,8 @@ func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, imageRanker: imageRanker, imageComponentRanker: imageComponentRanker, + imageCVEInfoDS: imageCVEInfo, + keyedMutex: concurrency.NewKeyedMutex(globaldb.DefaultDataStorePoolSize), } return ds @@ -265,6 +274,20 @@ func (ds *datastoreImpl) UpsertImage(ctx context.Context, image *storage.Image) ds.keyedMutex.Lock(image.GetId()) defer ds.keyedMutex.Unlock(image.GetId()) + if features.CVEFixTimestampCriteria.Enabled() { + // Populate the ImageCVEInfo table with CVE timing metadata + if err := ds.upsertImageCVEInfos(ctx, image); err != nil { + log.Warnf("Failed to upsert ImageCVEInfo: %v", err) + // Non-fatal, continue with image upsert + } + + // Enrich the CVEs with accurate timestamps from lookup table + if err := ds.enrichCVEsFromImageCVEInfo(ctx, image); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + // Non-fatal, continue with image upsert + } + } + ds.updateComponentRisk(image) enricher.FillScanStats(image) @@ -370,6 +393,83 @@ func (ds *datastoreImpl) updateComponentRisk(image *storage.Image) { } } +// upsertImageCVEInfos populates the ImageCVEInfo lookup table with CVE timing metadata. +func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage.Image) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + infos := make([]*storage.ImageCVEInfo, 0) + now := protocompat.TimestampNow() + + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + // Determine fix available timestamp: use scanner-provided value if available, + // otherwise fabricate from scan time if the CVE is fixable (has a fix version). + // This handles non-Red Hat data sources that don't provide fix timestamps. + fixAvailableTimestamp := vuln.GetFixAvailableTimestamp() + if fixAvailableTimestamp == nil && vuln.GetFixedBy() != "" { + fixAvailableTimestamp = now + } + + info := &storage.ImageCVEInfo{ + Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), + FixAvailableTimestamp: fixAvailableTimestamp, + FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing + } + infos = append(infos, info) + } + } + + return ds.imageCVEInfoDS.UpsertMany(ctx, infos) +} + +// enrichCVEsFromImageCVEInfo enriches the image's CVEs with accurate timestamps from the lookup table. +func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image *storage.Image) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + // Collect all IDs + ids := make([]string, 0) + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + ids = append(ids, cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource())) + } + } + + if len(ids) == 0 { + return nil + } + + // Batch fetch + infos, err := ds.imageCVEInfoDS.GetBatch(ctx, ids) + if err != nil { + return err + } + + // Build lookup map + infoMap := make(map[string]*storage.ImageCVEInfo) + for _, info := range infos { + infoMap[info.GetId()] = info + } + + // Enrich CVEs and blank out datasource after using it + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) + if info, ok := infoMap[id]; ok { + vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() + } + // Blank out datasource after using it - this is internal scanner data not meant for end users + vuln.Datasource = "" + } + } + + return nil +} + // ImageSearchResultConverter converts image search results to proto search results type ImageSearchResultConverter struct{} diff --git a/central/image/datastore/datastore_impl_flat_postgres_test.go b/central/image/datastore/datastore_impl_flat_postgres_test.go index 99e08af4a9c0e..678fce8472a51 100644 --- a/central/image/datastore/datastore_impl_flat_postgres_test.go +++ b/central/image/datastore/datastore_impl_flat_postgres_test.go @@ -8,6 +8,7 @@ import ( "sort" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEDS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEPostgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" "github.com/stackrox/rox/central/image/datastore/keyfence" @@ -43,13 +44,14 @@ func TestImageFlatDataStoreWithPostgres(t *testing.T) { type ImageFlatPostgresDataStoreTestSuite struct { suite.Suite - ctx context.Context - testDB *pgtest.TestPostgres - db postgres.DB - datastore DataStore - mockRisk *mockRisks.MockDataStore - componentDataStore imageComponentDS.DataStore - cveDataStore imageCVEDS.DataStore + ctx context.Context + testDB *pgtest.TestPostgres + db postgres.DB + datastore DataStore + mockRisk *mockRisks.MockDataStore + componentDataStore imageComponentDS.DataStore + cveDataStore imageCVEDS.DataStore + imageCVEInfoDatastore imageCVEInfoDS.DataStore } func (s *ImageFlatPostgresDataStoreTestSuite) SetupSuite() { @@ -60,8 +62,9 @@ func (s *ImageFlatPostgresDataStoreTestSuite) SetupSuite() { func (s *ImageFlatPostgresDataStoreTestSuite) SetupTest() { s.mockRisk = mockRisks.NewMockDataStore(gomock.NewController(s.T())) + s.imageCVEInfoDatastore = imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.db) dbStore := pgStoreV2.New(s.db, false, keyfence.ImageKeyFenceSingleton()) - s.datastore = NewWithPostgres(dbStore, s.mockRisk, ranking.ImageRanker(), ranking.ComponentRanker()) + s.datastore = NewWithPostgres(dbStore, s.mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), s.imageCVEInfoDatastore) componentStorage := imageComponentPostgres.New(s.db) s.componentDataStore = imageComponentDS.New(componentStorage, s.mockRisk, ranking.NewRanker()) @@ -76,6 +79,7 @@ func (s *ImageFlatPostgresDataStoreTestSuite) TearDownTest() { s.truncateTable(postgresSchema.ImagesTableName) s.truncateTable(postgresSchema.ImageComponentV2TableName) s.truncateTable(postgresSchema.ImageCvesV2TableName) + s.truncateTable(postgresSchema.ImageCveInfosTableName) } func (s *ImageFlatPostgresDataStoreTestSuite) TestSearchWithPostgres() { @@ -623,3 +627,201 @@ func cloneAndUpdateRiskPriority(image *storage.Image) *storage.Image { } return cloned } + +// TestImageCVEInfoIntegration tests the ImageCVEInfo lookup table integration +func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_PopulatesTable() { + // Enable the feature flag + s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") + + ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), + sac.ResourceScopeKeys(resources.Image), + )) + + // Create an image with CVEs that have fix timestamps + fixTime := protocompat.TimestampNow() + image := &storage.Image{ + Id: "test-image-cve-info", + Name: &storage.ImageName{ + FullName: "test/image:tag", + }, + Scan: &storage.ImageScan{ + OperatingSystem: "debian", + ScanTime: protocompat.TimestampNow(), + Components: []*storage.EmbeddedImageScanComponent{ + { + Name: "openssl", + Version: "1.1.1", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "CVE-2021-1234", + VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, + Datasource: "debian-bookworm-updater::debian:12", + FixAvailableTimestamp: fixTime, + }, + }, + }, + }, + }, + } + + // Upsert the image + s.NoError(s.datastore.UpsertImage(ctx, image)) + + // Verify ImageCVEInfo was populated + expectedID := pkgCVE.ImageCVEInfoID("CVE-2021-1234", "openssl", "debian-bookworm-updater::debian:12") + info, found, err := s.imageCVEInfoDatastore.Get(ctx, expectedID) + s.NoError(err) + s.True(found, "ImageCVEInfo should be populated after image upsert") + s.NotNil(info.GetFirstSystemOccurrence(), "FirstSystemOccurrence should be set") + s.Equal(fixTime.GetSeconds(), info.GetFixAvailableTimestamp().GetSeconds(), "FixAvailableTimestamp should match") +} + +func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_EnrichesFromLookupTable() { + // Enable the feature flag + s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") + + ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), + sac.ResourceScopeKeys(resources.Image), + )) + + // Pre-populate ImageCVEInfo with known timestamps + earlierTime := protocompat.TimestampNow() + earlierTime.Seconds -= 86400 // 1 day ago + + preExistingInfo := &storage.ImageCVEInfo{ + Id: pkgCVE.ImageCVEInfoID("CVE-2021-5678", "curl", "debian-updater::debian:11"), + FixAvailableTimestamp: earlierTime, + FirstSystemOccurrence: earlierTime, + } + s.NoError(s.imageCVEInfoDatastore.Upsert(ctx, preExistingInfo)) + + // Create an image with a CVE that matches the pre-existing info + image := &storage.Image{ + Id: "test-image-enrich", + Name: &storage.ImageName{ + FullName: "test/enrich:tag", + }, + Scan: &storage.ImageScan{ + OperatingSystem: "debian", + ScanTime: protocompat.TimestampNow(), + Components: []*storage.EmbeddedImageScanComponent{ + { + Name: "curl", + Version: "7.68.0", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "CVE-2021-5678", + VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, + Datasource: "debian-updater::debian:11", + // No fix timestamp set - should be enriched from lookup + }, + }, + }, + }, + }, + } + + // Upsert the image + s.NoError(s.datastore.UpsertImage(ctx, image)) + + // Retrieve the image and verify CVE was enriched + storedImage, found, err := s.datastore.GetImage(ctx, image.GetId()) + s.NoError(err) + s.True(found) + + // Check that the CVE was enriched with timestamps from the lookup table + vuln := storedImage.GetScan().GetComponents()[0].GetVulns()[0] + s.NotNil(vuln.GetFirstSystemOccurrence(), "FirstSystemOccurrence should be enriched") + s.Equal(earlierTime.GetSeconds(), vuln.GetFirstSystemOccurrence().GetSeconds(), "FirstSystemOccurrence should match pre-existing value") +} + +func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_PreservesEarlierTimestamps() { + // Enable the feature flag + s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") + + ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), + sac.ResourceScopeKeys(resources.Image), + )) + + // First image upsert - establishes initial timestamps + firstFixTime := protocompat.TimestampNow() + firstFixTime.Seconds -= 86400 // 1 day ago + + image1 := &storage.Image{ + Id: "test-image-preserve-1", + Name: &storage.ImageName{ + FullName: "test/preserve:v1", + }, + Scan: &storage.ImageScan{ + OperatingSystem: "alpine", + ScanTime: protocompat.TimestampNow(), + Components: []*storage.EmbeddedImageScanComponent{ + { + Name: "busybox", + Version: "1.33.0", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "CVE-2021-9999", + VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, + Datasource: "alpine-updater::alpine:3.14", + FixAvailableTimestamp: firstFixTime, + }, + }, + }, + }, + }, + } + s.NoError(s.datastore.UpsertImage(ctx, image1)) + + // Get the first system occurrence time + infoID := pkgCVE.ImageCVEInfoID("CVE-2021-9999", "busybox", "alpine-updater::alpine:3.14") + info1, found, err := s.imageCVEInfoDatastore.Get(ctx, infoID) + s.NoError(err) + s.True(found) + firstOccurrence := info1.GetFirstSystemOccurrence() + + // Second image upsert with a later fix timestamp - should preserve earlier timestamps + laterFixTime := protocompat.TimestampNow() + + image2 := &storage.Image{ + Id: "test-image-preserve-2", + Name: &storage.ImageName{ + FullName: "test/preserve:v2", + }, + Scan: &storage.ImageScan{ + OperatingSystem: "alpine", + ScanTime: protocompat.TimestampNow(), + Components: []*storage.EmbeddedImageScanComponent{ + { + Name: "busybox", + Version: "1.33.1", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "CVE-2021-9999", + VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, + Datasource: "alpine-updater::alpine:3.14", + FixAvailableTimestamp: laterFixTime, // Later timestamp + }, + }, + }, + }, + }, + } + s.NoError(s.datastore.UpsertImage(ctx, image2)) + + // Verify earlier timestamps are preserved + info2, found, err := s.imageCVEInfoDatastore.Get(ctx, infoID) + s.NoError(err) + s.True(found) + + // FirstSystemOccurrence should remain the same (earlier value preserved) + s.Equal(firstOccurrence.GetSeconds(), info2.GetFirstSystemOccurrence().GetSeconds(), + "FirstSystemOccurrence should preserve the earlier timestamp") + + // FixAvailableTimestamp should also preserve the earlier value + s.Equal(firstFixTime.GetSeconds(), info2.GetFixAvailableTimestamp().GetSeconds(), + "FixAvailableTimestamp should preserve the earlier timestamp") +} diff --git a/central/image/datastore/datastore_test_constructors.go b/central/image/datastore/datastore_test_constructors.go index 092ee90487bd6..2b4b9d816ab7c 100644 --- a/central/image/datastore/datastore_test_constructors.go +++ b/central/image/datastore/datastore_test_constructors.go @@ -3,6 +3,7 @@ package datastore import ( "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" "github.com/stackrox/rox/central/ranking" @@ -16,5 +17,6 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) DataStore { riskStore := riskDS.GetTestPostgresDataStore(t, pool) imageRanker := ranking.ImageRanker() imageComponentRanker := ranking.ComponentRanker() - return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) + return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker, imageCVEInfo) } diff --git a/central/image/datastore/singleton.go b/central/image/datastore/singleton.go index 1c4c9553c4828..f8e48e2342370 100644 --- a/central/image/datastore/singleton.go +++ b/central/image/datastore/singleton.go @@ -1,6 +1,7 @@ package datastore import ( + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -17,7 +18,7 @@ var ( func initialize() { storage := pgStoreV2.New(globaldb.GetPostgres(), false, keyfence.ImageKeyFenceSingleton()) - ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker()) + ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfoDS.Singleton()) } // Singleton provides the interface for non-service external interaction. diff --git a/central/image/datastore/store/v2/postgres/store.go b/central/image/datastore/store/v2/postgres/store.go index 32525eed5e387..cee5ba800b1bf 100644 --- a/central/image/datastore/store/v2/postgres/store.go +++ b/central/image/datastore/store/v2/postgres/store.go @@ -330,6 +330,7 @@ func copyFromImageComponentV2Cves(ctx context.Context, tx *postgres.Tx, iTime ti "componentid", "advisory_name", "advisory_link", + "fixavailabletimestamp", "serialized", } @@ -370,6 +371,7 @@ func copyFromImageComponentV2Cves(ctx context.Context, tx *postgres.Tx, iTime ti obj.GetComponentId(), obj.GetAdvisory().GetName(), obj.GetAdvisory().GetLink(), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, }) @@ -995,10 +997,11 @@ func (s *storeImpl) insertIntoImageComponentV2Cves(batch *pgx.Batch, obj *storag obj.GetFixedBy(), obj.GetComponentId(), obj.GetAdvisory().GetName(), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, } - finalStr := "INSERT INTO image_cves_v2 (Id, ImageId, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, advisory_name, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16,$17) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageId = EXCLUDED.ImageId, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, advisory_name = EXCLUDED.advisory_name, serialized = EXCLUDED.serialized" + finalStr := "INSERT INTO image_cves_v2 (Id, ImageId, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, advisory_name, FixAvailableTimestamp, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageId = EXCLUDED.ImageId, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, advisory_name = EXCLUDED.advisory_name, FixAvailableTimestamp = EXCLUDED.FixAvailableTimestamp, serialized = EXCLUDED.serialized" batch.Queue(finalStr, values...) return nil diff --git a/central/imagev2/datastore/datastore.go b/central/imagev2/datastore/datastore.go index 822590f67a77d..1a78cb2debad5 100644 --- a/central/imagev2/datastore/datastore.go +++ b/central/imagev2/datastore/datastore.go @@ -3,6 +3,7 @@ package datastore import ( "context" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/imagev2/datastore/store" "github.com/stackrox/rox/central/imagev2/views" "github.com/stackrox/rox/central/ranking" @@ -38,8 +39,8 @@ type DataStore interface { } // NewWithPostgres returns a new instance of DataStore using the input store, and searcher. -func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) DataStore { - ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker) +func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, imageCVEInfo imageCVEInfoDS.DataStore) DataStore { + ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker, imageCVEInfo) go ds.initializeRankers() return ds } diff --git a/central/imagev2/datastore/datastore_bench_postgres_test.go b/central/imagev2/datastore/datastore_bench_postgres_test.go index db7d1248ca418..97fcc86dcb9e7 100644 --- a/central/imagev2/datastore/datastore_bench_postgres_test.go +++ b/central/imagev2/datastore/datastore_bench_postgres_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" "github.com/stackrox/rox/central/ranking" @@ -32,7 +33,8 @@ func BenchmarkImageGetMany(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) + datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) ids := make([]string, 0, 100) images := make([]*storage.ImageV2, 0, 100) @@ -74,7 +76,8 @@ func BenchmarkImageUpsert(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) + datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) images := make([]*storage.ImageV2, 0, 100) for i := 0; i < 100; i++ { diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index 461fb17c2bb27..6971d114fa920 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -6,6 +6,7 @@ import ( "time" "github.com/pkg/errors" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/imagev2/datastore/store" "github.com/stackrox/rox/central/imagev2/views" @@ -15,9 +16,12 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/cve" "github.com/stackrox/rox/pkg/errorhelpers" + "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/scancomponent" @@ -40,10 +44,13 @@ type datastoreImpl struct { imageRanker *ranking.Ranker imageComponentRanker *ranking.Ranker + + imageCVEInfoDS imageCVEInfoDS.DataStore } func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, - imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) *datastoreImpl { + imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, + imageCVEInfo imageCVEInfoDS.DataStore) *datastoreImpl { ds := &datastoreImpl{ storage: storage, @@ -52,6 +59,8 @@ func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, imageRanker: imageRanker, imageComponentRanker: imageComponentRanker, + imageCVEInfoDS: imageCVEInfo, + keyedMutex: concurrency.NewKeyedMutex(globaldb.DefaultDataStorePoolSize), } return ds @@ -271,6 +280,20 @@ func (ds *datastoreImpl) UpsertImage(ctx context.Context, image *storage.ImageV2 ds.keyedMutex.Lock(image.GetId()) defer ds.keyedMutex.Unlock(image.GetId()) + if features.CVEFixTimestampCriteria.Enabled() { + // Populate the ImageCVEInfo lookup table with CVE timing metadata + if err := ds.upsertImageCVEInfos(ctx, image); err != nil { + log.Warnf("Failed to upsert ImageCVEInfo: %v", err) + // Non-fatal, continue with image upsert + } + + // Enrich the CVEs with accurate timestamps from lookup table + if err := ds.enrichCVEsFromImageCVEInfo(ctx, image); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + // Non-fatal, continue with image upsert + } + } + ds.updateComponentRisk(image) utils.FillScanStatsV2(image) @@ -376,6 +399,83 @@ func (ds *datastoreImpl) updateComponentRisk(image *storage.ImageV2) { } } +// upsertImageCVEInfos populates the ImageCVEInfo lookup table with CVE timing metadata. +func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage.ImageV2) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + infos := make([]*storage.ImageCVEInfo, 0) + now := protocompat.TimestampNow() + + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + // Determine fix available timestamp: use scanner-provided value if available, + // otherwise fabricate from scan time if the CVE is fixable (has a fix version). + // This handles non-Red Hat data sources that don't provide fix timestamps. + fixAvailableTimestamp := vuln.GetFixAvailableTimestamp() + if fixAvailableTimestamp == nil && vuln.GetFixedBy() != "" { + fixAvailableTimestamp = now + } + + info := &storage.ImageCVEInfo{ + Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), + FixAvailableTimestamp: fixAvailableTimestamp, + FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing + } + infos = append(infos, info) + } + } + + return ds.imageCVEInfoDS.UpsertMany(ctx, infos) +} + +// enrichCVEsFromImageCVEInfo enriches the image's CVEs with accurate timestamps from the lookup table. +func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image *storage.ImageV2) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + // Collect all IDs + ids := make([]string, 0) + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + ids = append(ids, cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource())) + } + } + + if len(ids) == 0 { + return nil + } + + // Batch fetch + infos, err := ds.imageCVEInfoDS.GetBatch(ctx, ids) + if err != nil { + return err + } + + // Build lookup map + infoMap := make(map[string]*storage.ImageCVEInfo) + for _, info := range infos { + infoMap[info.GetId()] = info + } + + // Enrich CVEs and blank out datasource after using it + for _, component := range image.GetScan().GetComponents() { + for _, vuln := range component.GetVulns() { + id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) + if info, ok := infoMap[id]; ok { + vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() + } + // Blank out datasource after using it - this is internal scanner data not meant for end users + vuln.Datasource = "" + } + } + + return nil +} + // ImageSearchResultConverter implements search.SearchResultConverter for image search results. // This enables single-pass query construction for SearchResult protos. type ImageSearchResultConverter struct{} diff --git a/central/imagev2/datastore/datastore_test_constructors.go b/central/imagev2/datastore/datastore_test_constructors.go index 0fa3f8dc5e127..8ad2d8401c852 100644 --- a/central/imagev2/datastore/datastore_test_constructors.go +++ b/central/imagev2/datastore/datastore_test_constructors.go @@ -3,6 +3,7 @@ package datastore import ( "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/imagev2/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" "github.com/stackrox/rox/central/ranking" @@ -16,5 +17,6 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) DataStore { riskStore := riskDS.GetTestPostgresDataStore(t, pool) imageRanker := ranking.ImageRanker() imageComponentRanker := ranking.ComponentRanker() - return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) + return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker, imageCVEInfo) } diff --git a/central/imagev2/datastore/singleton.go b/central/imagev2/datastore/singleton.go index fe6f5fb3e6ed7..1bad2af434508 100644 --- a/central/imagev2/datastore/singleton.go +++ b/central/imagev2/datastore/singleton.go @@ -1,6 +1,7 @@ package datastore import ( + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/imagev2/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" @@ -18,7 +19,7 @@ var ( func initialize() { storage := pgStore.New(globaldb.GetPostgres(), false, keyfence.ImageKeyFenceSingleton()) - ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker()) + ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfoDS.Singleton()) } // Singleton provides the interface for non-service external interaction. diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index 9a43a71a66587..1127f419035a4 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -356,6 +356,7 @@ func copyFromImageComponentV2Cves(ctx context.Context, tx *postgres.Tx, iTime ti "componentid", "advisory_name", "advisory_link", + "fixavailabletimestamp", "serialized", } @@ -396,6 +397,7 @@ func copyFromImageComponentV2Cves(ctx context.Context, tx *postgres.Tx, iTime ti obj.GetComponentId(), obj.GetAdvisory().GetName(), obj.GetAdvisory().GetLink(), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, }) @@ -1046,10 +1048,11 @@ func (s *storeImpl) insertIntoImageComponentV2Cves(batch *pgx.Batch, obj *storag obj.GetFixedBy(), obj.GetComponentId(), obj.GetAdvisory().GetName(), + protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), serialized, } - finalStr := "INSERT INTO image_cves_v2 (Id, ImageIdV2, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, advisory_name, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16,$17) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageIdV2 = EXCLUDED.ImageIdV2, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, advisory_name = EXCLUDED.advisory_name, serialized = EXCLUDED.serialized" + finalStr := "INSERT INTO image_cves_v2 (Id, ImageIdV2, CveBaseInfo_Cve, CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability, Cvss, Severity, ImpactScore, Nvdcvss, FirstImageOccurrence, State, IsFixable, FixedBy, ComponentId, advisory_name, FixAvailableTimestamp, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, ImageIdV2 = EXCLUDED.ImageIdV2, CveBaseInfo_Cve = EXCLUDED.CveBaseInfo_Cve, CveBaseInfo_PublishedOn = EXCLUDED.CveBaseInfo_PublishedOn, CveBaseInfo_CreatedAt = EXCLUDED.CveBaseInfo_CreatedAt, CveBaseInfo_Epss_EpssProbability = EXCLUDED.CveBaseInfo_Epss_EpssProbability, Cvss = EXCLUDED.Cvss, Severity = EXCLUDED.Severity, ImpactScore = EXCLUDED.ImpactScore, Nvdcvss = EXCLUDED.Nvdcvss, FirstImageOccurrence = EXCLUDED.FirstImageOccurrence, State = EXCLUDED.State, IsFixable = EXCLUDED.IsFixable, FixedBy = EXCLUDED.FixedBy, ComponentId = EXCLUDED.ComponentId, advisory_name = EXCLUDED.advisory_name, FixAvailableTimestamp = EXCLUDED.FixAvailableTimestamp, serialized = EXCLUDED.serialized" batch.Queue(finalStr, values...) return nil diff --git a/central/imagev2/datastoretest/datastore_impl_test.go b/central/imagev2/datastoretest/datastore_impl_test.go index 3ed6c2147ca99..1e92e0eab8a53 100644 --- a/central/imagev2/datastoretest/datastore_impl_test.go +++ b/central/imagev2/datastoretest/datastore_impl_test.go @@ -8,6 +8,7 @@ import ( "sort" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEDS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEPostgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" imageComponentDS "github.com/stackrox/rox/central/imagecomponent/v2/datastore" @@ -63,7 +64,8 @@ func (s *ImageV2DataStoreTestSuite) SetupSuite() { func (s *ImageV2DataStoreTestSuite) SetupTest() { s.mockRisk = mockRisks.NewMockDataStore(gomock.NewController(s.T())) dbStore := pgStore.New(s.testDB.DB, false, keyfence.ImageKeyFenceSingleton()) - s.datastore = imageDataStoreV2.NewWithPostgres(dbStore, s.mockRisk, ranking.NewRanker(), ranking.NewRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) + s.datastore = imageDataStoreV2.NewWithPostgres(dbStore, s.mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) componentStorage := imageComponentPostgres.New(s.testDB.DB) s.componentDataStore = imageComponentDS.New(componentStorage, s.mockRisk, ranking.NewRanker()) diff --git a/central/pruning/pruning_test.go b/central/pruning/pruning_test.go index dcb8873f09409..0bbd7141fd5aa 100644 --- a/central/pruning/pruning_test.go +++ b/central/pruning/pruning_test.go @@ -17,6 +17,7 @@ import ( configDatastore "github.com/stackrox/rox/central/config/datastore" configDatastoreMocks "github.com/stackrox/rox/central/config/datastore/mocks" clusterCVEDS "github.com/stackrox/rox/central/cve/cluster/datastore/mocks" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" nodeCVEDS "github.com/stackrox/rox/central/cve/node/datastore" deploymentDatastore "github.com/stackrox/rox/central/deployment/datastore" imageDatastore "github.com/stackrox/rox/central/image/datastore" @@ -253,12 +254,14 @@ func (s *PruningTestSuite) generateImageDataStructures(ctx context.Context) (ale var images imageDatastore.DataStore var imagesV2 imageV2Datastore.DataStore + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.pool) if features.FlattenImageData.Enabled() { imagesV2 = imageV2Datastore.NewWithPostgres( imageV2Postgres.New(s.pool, true, concurrency.NewKeyFence()), mockRiskDatastore, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) } else { images = imageDatastore.NewWithPostgres( @@ -266,6 +269,7 @@ func (s *PruningTestSuite) generateImageDataStructures(ctx context.Context) (ale mockRiskDatastore, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) } diff --git a/central/reprocessor/reprocessor_test.go b/central/reprocessor/reprocessor_test.go index d95e80c37a9db..abb034535cba2 100644 --- a/central/reprocessor/reprocessor_test.go +++ b/central/reprocessor/reprocessor_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDatastore "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2Datastore "github.com/stackrox/rox/central/imagev2/datastore" @@ -34,7 +35,8 @@ func TestImagesWithSignaturesQuery(t *testing.T) { pool := testingDB.DB defer pool.Close() - imageDS := imageDatastore.NewWithPostgres(imagePostgresV2.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) + imageDS := imageDatastore.NewWithPostgres(imagePostgresV2.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfo) imgWithSignature := fixtures.GetImage() imgWithoutSignature := fixtures.GetImageWithUniqueComponents(10) @@ -80,7 +82,8 @@ func TestImagesWithSignaturesQueryV2(t *testing.T) { pool := testingDB.DB defer pool.Close() - imageDS := imageV2Datastore.NewWithPostgres(imageV2PG.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker()) + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) + imageDS := imageV2Datastore.NewWithPostgres(imageV2PG.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfo) imgWithSignature := fixtures.GetImageV2() imgWithoutSignature := fixtures.GetImageV2WithUniqueComponents(10) diff --git a/central/views/deployments/view_test.go b/central/views/deployments/view_test.go index 2a6cdb213d0da..a05c10c46bfcb 100644 --- a/central/views/deployments/view_test.go +++ b/central/views/deployments/view_test.go @@ -7,6 +7,7 @@ import ( "sort" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" deploymentDS "github.com/stackrox/rox/central/deployment/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -101,11 +102,13 @@ func (s *DeploymentViewTestSuite) SetupSuite() { mockRisk := mockRisks.NewMockDataStore(mockCtrl) // Initialize the datastore. + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) imageStore := imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) deploymentStore, err := deploymentDS.NewTestDataStore( s.T(), diff --git a/central/views/imagecveflat/view_test.go b/central/views/imagecveflat/view_test.go index 9bb239e94698d..01a279b1d5e06 100644 --- a/central/views/imagecveflat/view_test.go +++ b/central/views/imagecveflat/view_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEV2DS "github.com/stackrox/rox/central/cve/image/v2/datastore" deploymentDS "github.com/stackrox/rox/central/deployment/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" @@ -118,11 +119,13 @@ func (s *ImageCVEFlatViewTestSuite) SetupSuite() { mockRisk := mockRisks.NewMockDataStore(mockCtrl) // Initialize the datastore. + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) imageStore := imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) deploymentStore, err := deploymentDS.NewTestDataStore( s.T(), diff --git a/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go b/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go index 0533a8ee1e377..5bbb0ca4d5064 100644 --- a/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go +++ b/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go @@ -6,6 +6,7 @@ import ( "context" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2DS "github.com/stackrox/rox/central/imagev2/datastore" @@ -73,11 +74,13 @@ func (s *VulnReqQueryManagerTestSuite) SetupTest() { } func (s *VulnReqQueryManagerTestSuite) createImageDataStore() { + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) s.imageDataStore = imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisks.NewMockDataStore(s.mockCtrl), ranking.NewRanker(), ranking.NewRanker(), + imageCVEInfo, ) if features.FlattenImageData.Enabled() { s.imageV2DataStore = imageV2DS.NewWithPostgres( @@ -85,6 +88,7 @@ func (s *VulnReqQueryManagerTestSuite) createImageDataStore() { mockRisks.NewMockDataStore(s.mockCtrl), ranking.NewRanker(), ranking.NewRanker(), + imageCVEInfo, ) } } diff --git a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go index c0b99b4d81b35..d629494a40b10 100644 --- a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go +++ b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go @@ -6,6 +6,7 @@ import ( "context" "testing" + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2DS "github.com/stackrox/rox/central/imagev2/datastore" @@ -55,12 +56,14 @@ func (m *managerImplTestFlatData) SetupSuite() { m.Require().NoError(err) var imageStore imageDS.DataStore var imageV2Store imageV2DS.DataStore + imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(m.T(), m.testDB.DB) if features.FlattenImageData.Enabled() { imageV2Store = imageV2DS.NewWithPostgres( imageV2Postgres.New(m.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) } else { imageStore = imageDS.NewWithPostgres( @@ -68,6 +71,7 @@ func (m *managerImplTestFlatData) SetupSuite() { mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), + imageCVEInfo, ) } m.manager = &managerImpl{ diff --git a/generated/storage/cve.pb.go b/generated/storage/cve.pb.go index 759a332d792af..89540f104688a 100644 --- a/generated/storage/cve.pb.go +++ b/generated/storage/cve.pb.go @@ -1584,12 +1584,14 @@ type ImageCVEV2 struct { // Types that are valid to be assigned to HasFixedBy: // // *ImageCVEV2_FixedBy - HasFixedBy isImageCVEV2_HasFixedBy `protobuf_oneof:"has_fixed_by"` - ComponentId string `protobuf:"bytes,13,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty" sql:"fk(ImageComponentV2:id),index=btree"` // @gotags: sql:"fk(ImageComponentV2:id),index=btree" - Advisory *Advisory `protobuf:"bytes,14,opt,name=advisory,proto3" json:"advisory,omitempty"` - ImageIdV2 string `protobuf:"bytes,15,opt,name=image_id_v2,json=imageIdV2,proto3" json:"image_id_v2,omitempty" sql:"fk(ImageV2:id),index=btree,allow-null"` // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + HasFixedBy isImageCVEV2_HasFixedBy `protobuf_oneof:"has_fixed_by"` + ComponentId string `protobuf:"bytes,13,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty" sql:"fk(ImageComponentV2:id),index=btree"` // @gotags: sql:"fk(ImageComponentV2:id),index=btree" + Advisory *Advisory `protobuf:"bytes,14,opt,name=advisory,proto3" json:"advisory,omitempty"` + ImageIdV2 string `protobuf:"bytes,15,opt,name=image_id_v2,json=imageIdV2,proto3" json:"image_id_v2,omitempty" sql:"fk(ImageV2:id),index=btree,allow-null"` // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" + // Timestamp when the fix for this CVE was made available according to the sources. + FixAvailableTimestamp *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=fix_available_timestamp,json=fixAvailableTimestamp,proto3" json:"fix_available_timestamp,omitempty" search:"CVE Fix Available Timestamp,hidden"` // @gotags: search:"CVE Fix Available Timestamp,hidden" + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ImageCVEV2) Reset() { @@ -1737,6 +1739,13 @@ func (x *ImageCVEV2) GetImageIdV2() string { return "" } +func (x *ImageCVEV2) GetFixAvailableTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.FixAvailableTimestamp + } + return nil +} + type isImageCVEV2_HasFixedBy interface { isImageCVEV2_HasFixedBy() } @@ -2680,7 +2689,7 @@ const file_storage_cve_proto_rawDesc = "" + "\anvdcvss\x18\n" + " \x01(\x02R\anvdcvss\x125\n" + "\fcvss_metrics\x18\v \x03(\v2\x12.storage.CVSSScoreR\vcvssMetrics\x12E\n" + - "\x11nvd_score_version\x18\f \x01(\x0e2\x19.storage.CvssScoreVersionR\x0fnvdScoreVersion:\x02\x18\x01\"\x88\x05\n" + + "\x11nvd_score_version\x18\f \x01(\x0e2\x19.storage.CvssScoreVersionR\x0fnvdScoreVersion:\x02\x18\x01\"\xdc\x05\n" + "\n" + "ImageCVEV2\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + @@ -2699,7 +2708,8 @@ const file_storage_cve_proto_rawDesc = "" + "\bfixed_by\x18\f \x01(\tH\x00R\afixedBy\x12!\n" + "\fcomponent_id\x18\r \x01(\tR\vcomponentId\x12-\n" + "\badvisory\x18\x0e \x01(\v2\x11.storage.AdvisoryR\badvisory\x12\x1e\n" + - "\vimage_id_v2\x18\x0f \x01(\tR\timageIdV2B\x0e\n" + + "\vimage_id_v2\x18\x0f \x01(\tR\timageIdV2\x12R\n" + + "\x17fix_available_timestamp\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\x15fixAvailableTimestampB\x0e\n" + "\fhas_fixed_by\"\xe4\x03\n" + "\aNodeCVE\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x124\n" + @@ -2930,47 +2940,48 @@ var file_storage_cve_proto_depIdxs = []int32{ 35, // 31: storage.ImageCVEV2.first_image_occurrence:type_name -> google.protobuf.Timestamp 0, // 32: storage.ImageCVEV2.state:type_name -> storage.VulnerabilityState 22, // 33: storage.ImageCVEV2.advisory:type_name -> storage.Advisory - 21, // 34: storage.NodeCVE.cve_base_info:type_name -> storage.CVEInfo - 1, // 35: storage.NodeCVE.severity:type_name -> storage.VulnerabilitySeverity - 35, // 36: storage.NodeCVE.snooze_start:type_name -> google.protobuf.Timestamp - 35, // 37: storage.NodeCVE.snooze_expiry:type_name -> google.protobuf.Timestamp - 35, // 38: storage.NodeCVE.orphaned_time:type_name -> google.protobuf.Timestamp - 21, // 39: storage.ClusterCVE.cve_base_info:type_name -> storage.CVEInfo - 1, // 40: storage.ClusterCVE.severity:type_name -> storage.VulnerabilitySeverity - 35, // 41: storage.ClusterCVE.snooze_start:type_name -> google.protobuf.Timestamp - 35, // 42: storage.ClusterCVE.snooze_expiry:type_name -> google.protobuf.Timestamp - 4, // 43: storage.ClusterCVE.type:type_name -> storage.CVE.CVEType - 2, // 44: storage.CVSSScore.source:type_name -> storage.Source - 28, // 45: storage.CVSSScore.cvssv2:type_name -> storage.CVSSV2 - 29, // 46: storage.CVSSScore.cvssv3:type_name -> storage.CVSSV3 - 8, // 47: storage.CVSSV2.attack_vector:type_name -> storage.CVSSV2.AttackVector - 9, // 48: storage.CVSSV2.access_complexity:type_name -> storage.CVSSV2.AccessComplexity - 10, // 49: storage.CVSSV2.authentication:type_name -> storage.CVSSV2.Authentication - 7, // 50: storage.CVSSV2.confidentiality:type_name -> storage.CVSSV2.Impact - 7, // 51: storage.CVSSV2.integrity:type_name -> storage.CVSSV2.Impact - 7, // 52: storage.CVSSV2.availability:type_name -> storage.CVSSV2.Impact - 11, // 53: storage.CVSSV2.severity:type_name -> storage.CVSSV2.Severity - 13, // 54: storage.CVSSV3.attack_vector:type_name -> storage.CVSSV3.AttackVector - 14, // 55: storage.CVSSV3.attack_complexity:type_name -> storage.CVSSV3.Complexity - 15, // 56: storage.CVSSV3.privileges_required:type_name -> storage.CVSSV3.Privileges - 16, // 57: storage.CVSSV3.user_interaction:type_name -> storage.CVSSV3.UserInteraction - 17, // 58: storage.CVSSV3.scope:type_name -> storage.CVSSV3.Scope - 12, // 59: storage.CVSSV3.confidentiality:type_name -> storage.CVSSV3.Impact - 12, // 60: storage.CVSSV3.integrity:type_name -> storage.CVSSV3.Impact - 12, // 61: storage.CVSSV3.availability:type_name -> storage.CVSSV3.Impact - 18, // 62: storage.CVSSV3.severity:type_name -> storage.CVSSV3.Severity - 35, // 63: storage.ImageCVEInfo.fix_available_timestamp:type_name -> google.protobuf.Timestamp - 35, // 64: storage.ImageCVEInfo.first_system_occurrence:type_name -> google.protobuf.Timestamp - 1, // 65: storage.CVE.DistroSpecific.severity:type_name -> storage.VulnerabilitySeverity - 5, // 66: storage.CVE.DistroSpecific.score_version:type_name -> storage.CVE.ScoreVersion - 28, // 67: storage.CVE.DistroSpecific.cvss_v2:type_name -> storage.CVSSV2 - 29, // 68: storage.CVE.DistroSpecific.cvss_v3:type_name -> storage.CVSSV3 - 31, // 69: storage.CVE.DistroSpecificsEntry.value:type_name -> storage.CVE.DistroSpecific - 70, // [70:70] is the sub-list for method output_type - 70, // [70:70] is the sub-list for method input_type - 70, // [70:70] is the sub-list for extension type_name - 70, // [70:70] is the sub-list for extension extendee - 0, // [0:70] is the sub-list for field type_name + 35, // 34: storage.ImageCVEV2.fix_available_timestamp:type_name -> google.protobuf.Timestamp + 21, // 35: storage.NodeCVE.cve_base_info:type_name -> storage.CVEInfo + 1, // 36: storage.NodeCVE.severity:type_name -> storage.VulnerabilitySeverity + 35, // 37: storage.NodeCVE.snooze_start:type_name -> google.protobuf.Timestamp + 35, // 38: storage.NodeCVE.snooze_expiry:type_name -> google.protobuf.Timestamp + 35, // 39: storage.NodeCVE.orphaned_time:type_name -> google.protobuf.Timestamp + 21, // 40: storage.ClusterCVE.cve_base_info:type_name -> storage.CVEInfo + 1, // 41: storage.ClusterCVE.severity:type_name -> storage.VulnerabilitySeverity + 35, // 42: storage.ClusterCVE.snooze_start:type_name -> google.protobuf.Timestamp + 35, // 43: storage.ClusterCVE.snooze_expiry:type_name -> google.protobuf.Timestamp + 4, // 44: storage.ClusterCVE.type:type_name -> storage.CVE.CVEType + 2, // 45: storage.CVSSScore.source:type_name -> storage.Source + 28, // 46: storage.CVSSScore.cvssv2:type_name -> storage.CVSSV2 + 29, // 47: storage.CVSSScore.cvssv3:type_name -> storage.CVSSV3 + 8, // 48: storage.CVSSV2.attack_vector:type_name -> storage.CVSSV2.AttackVector + 9, // 49: storage.CVSSV2.access_complexity:type_name -> storage.CVSSV2.AccessComplexity + 10, // 50: storage.CVSSV2.authentication:type_name -> storage.CVSSV2.Authentication + 7, // 51: storage.CVSSV2.confidentiality:type_name -> storage.CVSSV2.Impact + 7, // 52: storage.CVSSV2.integrity:type_name -> storage.CVSSV2.Impact + 7, // 53: storage.CVSSV2.availability:type_name -> storage.CVSSV2.Impact + 11, // 54: storage.CVSSV2.severity:type_name -> storage.CVSSV2.Severity + 13, // 55: storage.CVSSV3.attack_vector:type_name -> storage.CVSSV3.AttackVector + 14, // 56: storage.CVSSV3.attack_complexity:type_name -> storage.CVSSV3.Complexity + 15, // 57: storage.CVSSV3.privileges_required:type_name -> storage.CVSSV3.Privileges + 16, // 58: storage.CVSSV3.user_interaction:type_name -> storage.CVSSV3.UserInteraction + 17, // 59: storage.CVSSV3.scope:type_name -> storage.CVSSV3.Scope + 12, // 60: storage.CVSSV3.confidentiality:type_name -> storage.CVSSV3.Impact + 12, // 61: storage.CVSSV3.integrity:type_name -> storage.CVSSV3.Impact + 12, // 62: storage.CVSSV3.availability:type_name -> storage.CVSSV3.Impact + 18, // 63: storage.CVSSV3.severity:type_name -> storage.CVSSV3.Severity + 35, // 64: storage.ImageCVEInfo.fix_available_timestamp:type_name -> google.protobuf.Timestamp + 35, // 65: storage.ImageCVEInfo.first_system_occurrence:type_name -> google.protobuf.Timestamp + 1, // 66: storage.CVE.DistroSpecific.severity:type_name -> storage.VulnerabilitySeverity + 5, // 67: storage.CVE.DistroSpecific.score_version:type_name -> storage.CVE.ScoreVersion + 28, // 68: storage.CVE.DistroSpecific.cvss_v2:type_name -> storage.CVSSV2 + 29, // 69: storage.CVE.DistroSpecific.cvss_v3:type_name -> storage.CVSSV3 + 31, // 70: storage.CVE.DistroSpecificsEntry.value:type_name -> storage.CVE.DistroSpecific + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_storage_cve_proto_init() } diff --git a/generated/storage/cve_vtproto.pb.go b/generated/storage/cve_vtproto.pb.go index 40fba8ceab89f..35eb569be525f 100644 --- a/generated/storage/cve_vtproto.pb.go +++ b/generated/storage/cve_vtproto.pb.go @@ -269,6 +269,7 @@ func (m *ImageCVEV2) CloneVT() *ImageCVEV2 { r.ComponentId = m.ComponentId r.Advisory = m.Advisory.CloneVT() r.ImageIdV2 = m.ImageIdV2 + r.FixAvailableTimestamp = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.FixAvailableTimestamp).CloneVT()) if m.HasFixedBy != nil { r.HasFixedBy = m.HasFixedBy.(interface { CloneVT() isImageCVEV2_HasFixedBy @@ -907,6 +908,9 @@ func (this *ImageCVEV2) EqualVT(that *ImageCVEV2) bool { if this.ImageIdV2 != that.ImageIdV2 { return false } + if !(*timestamppb1.Timestamp)(this.FixAvailableTimestamp).EqualVT((*timestamppb1.Timestamp)(that.FixAvailableTimestamp)) { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -2033,6 +2037,18 @@ func (m *ImageCVEV2) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } i -= size } + if m.FixAvailableTimestamp != nil { + size, err := (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if len(m.ImageIdV2) > 0 { i -= len(m.ImageIdV2) copy(dAtA[i:], m.ImageIdV2) @@ -3101,6 +3117,10 @@ func (m *ImageCVEV2) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.FixAvailableTimestamp != nil { + l = (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -5894,6 +5914,42 @@ func (m *ImageCVEV2) UnmarshalVT(dAtA []byte) error { } m.ImageIdV2 = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FixAvailableTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FixAvailableTimestamp == nil { + m.FixAvailableTimestamp = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -10007,6 +10063,42 @@ func (m *ImageCVEV2) UnmarshalVTUnsafe(dAtA []byte) error { } m.ImageIdV2 = stringValue iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FixAvailableTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FixAvailableTimestamp == nil { + m.FixAvailableTimestamp = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/booleanpolicy/violationmessages/printer.go b/pkg/booleanpolicy/violationmessages/printer.go index e7e56e6bd2554..a0c05b6782c0e 100644 --- a/pkg/booleanpolicy/violationmessages/printer.go +++ b/pkg/booleanpolicy/violationmessages/printer.go @@ -33,6 +33,7 @@ var ( fieldnames.DaysSincePublished: {{required: set.NewStringSet(search.CVE.String(), search.CVEPublishedOn.String()), printerFuncKey: printer.CveKey}}, fieldnames.DaysSinceImageFirstDiscovered: {{required: set.NewStringSet(search.CVE.String(), search.FirstImageOccurrenceTimestamp.String()), printerFuncKey: printer.CveKey}}, fieldnames.DaysSinceSystemFirstDiscovered: {{required: set.NewStringSet(search.CVE.String(), search.FirstSystemOccurrenceTimestamp.String()), printerFuncKey: printer.CveKey}}, + fieldnames.DaysSinceFixAvailable: {{required: set.NewStringSet(search.CVE.String(), search.CVEFixAvailable.String()), printerFuncKey: printer.CveKey}}, fieldnames.DisallowedAnnotation: {{required: set.NewStringSet(search.DeploymentAnnotation.String()), printerFuncKey: printer.DisallowedAnnotationKey}}, fieldnames.DisallowedImageLabel: {{required: set.NewStringSet(search.ImageLabel.String()), printerFuncKey: printer.DisallowedImageLabelKey}}, fieldnames.DockerfileLine: {{required: set.NewStringSet(augmentedobjs.DockerfileLineCustomTag), printerFuncKey: printer.LineKey}}, diff --git a/pkg/cve/cve.go b/pkg/cve/cve.go index 32086230d5ba3..4a790689259fe 100644 --- a/pkg/cve/cve.go +++ b/pkg/cve/cve.go @@ -38,3 +38,11 @@ func IDToParts(id string) (string, string) { } return parts[0], "" } + +// ImageCVEInfoID creates a composite ID for ImageCVEInfo. +// Format: cve#package#datasource (uses standard ID delimiter) +// Example: "CVE-2021-1234#curl#debian-bookworm-updater::debian:12" +// For Red Hat vulns, datasource will be empty string. +func ImageCVEInfoID(cve, packageName, datasource string) string { + return pgSearch.IDFromPks([]string{cve, packageName, datasource}) +} diff --git a/pkg/postgres/schema/image_cves_v2.go b/pkg/postgres/schema/image_cves_v2.go index d0341f62bdd8a..d487e5a5ae038 100644 --- a/pkg/postgres/schema/image_cves_v2.go +++ b/pkg/postgres/schema/image_cves_v2.go @@ -81,6 +81,7 @@ type ImageCvesV2 struct { AdvisoryName string `gorm:"column:advisory_name;type:varchar"` AdvisoryLink string `gorm:"column:advisory_link;type:varchar"` ImageIDV2 string `gorm:"column:imageidv2;type:varchar;index:imagecvesv2_imageidv2,type:btree"` + FixAvailableTimestamp *time.Time `gorm:"column:fixavailabletimestamp;type:timestamp"` Serialized []byte `gorm:"column:serialized;type:bytea"` ImagesRef Images `gorm:"foreignKey:imageid;references:id;belongsTo;constraint:OnDelete:CASCADE"` ImageComponentV2Ref ImageComponentV2 `gorm:"foreignKey:componentid;references:id;belongsTo;constraint:OnDelete:CASCADE"` diff --git a/proto/storage/cve.proto b/proto/storage/cve.proto index da323bbe6c935..5a932d6db6d69 100644 --- a/proto/storage/cve.proto +++ b/proto/storage/cve.proto @@ -196,6 +196,9 @@ message ImageCVEV2 { storage.Advisory advisory = 14; string image_id_v2 = 15; // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" + + // Timestamp when the fix for this CVE was made available according to the sources. + google.protobuf.Timestamp fix_available_timestamp = 16; // @gotags: search:"CVE Fix Available Timestamp,hidden" } message NodeCVE { diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index dd081ddf7697a..675c3ca9ab68b 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -6598,6 +6598,11 @@ "id": 15, "name": "image_id_v2", "type": "string" + }, + { + "id": 16, + "name": "fix_available_timestamp", + "type": "google.protobuf.Timestamp" } ] }, From 9a4a04823cfc87dfbaca03da07c5ac9904388863 Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Tue, 27 Jan 2026 01:47:02 +0530 Subject: [PATCH 014/232] chore(be): Enable the CVE Fix timestamp feature flag by default (#18671) --- CHANGELOG.md | 1 + pkg/features/list.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c51d8dd67efd..c304847b31bd6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc - ROX-31738: Added the `spec.customize.deploymentDefaults` field to Central and SecuredCluster CRDs, for configuring global default scheduling constraints for Deployments. This was previously possible on a per-component basis. - ROX-30094, ROX-30610, ROX-30740: Add new namespaces to Layered Products default config regex. - ROX-31960, ROX-32449: include and exclude filters for custom metrics. +- ROX-30641: Added a new policy criteria "Days Since CVE Fix Was Available". ### Removed Features - ROX-31727: `/v1/cve/requests` APIs (deprecated in 4.3.0) for managing vulnerability exceptions have been removed. diff --git a/pkg/features/list.go b/pkg/features/list.go index a83570db3b2b1..61658416e8d06 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -111,7 +111,7 @@ var ( SensitiveFileActivity = registerFeature("Enable sensitive file monitoring", "ROX_SENSITIVE_FILE_ACTIVITY", enabled) // CVEFixTimestampCriteria enables the new CVE Fix timestamp criteria - CVEFixTimestampCriteria = registerFeature("Enable grace period criteria based on CVE fix timestamp", "ROX_CVE_FIX_TIMESTAMP") + CVEFixTimestampCriteria = registerFeature("Enable grace period criteria based on CVE fix timestamp", "ROX_CVE_FIX_TIMESTAMP", enabled) // BaseImageDetection enables base image detection and management functionality. BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION") From 0aa4ebc44fa48fed1b6c85afe8c1f5dd9cf4f7ef Mon Sep 17 00:00:00 2001 From: Giles Hutton Date: Mon, 26 Jan 2026 20:31:01 +0000 Subject: [PATCH 015/232] chore(fim): tidy up for release (#18672) --- generated/api/v1/alert_service.swagger.json | 1 - .../api/v1/detection_service.swagger.json | 1 - generated/storage/file_access.pb.go | 18 +++++++----------- pkg/booleanpolicy/validate_test.go | 2 +- pkg/booleanpolicy/value_regex.go | 2 +- .../printer/file_access_test.go | 9 --------- proto/storage/file_access.proto | 3 +-- proto/storage/proto.lock | 6 +----- sensor/common/detector/detector.go | 1 + sensor/common/filesystem/pipeline/pipeline.go | 13 ------------- .../Wizard/Step3/policyCriteriaDescriptors.tsx | 3 +-- ui/apps/platform/src/types/fileAccess.proto.ts | 1 - 12 files changed, 13 insertions(+), 47 deletions(-) diff --git a/generated/api/v1/alert_service.swagger.json b/generated/api/v1/alert_service.swagger.json index a7a4f224b1578..9ae883792eb35 100644 --- a/generated/api/v1/alert_service.swagger.json +++ b/generated/api/v1/alert_service.swagger.json @@ -760,7 +760,6 @@ "RENAME", "PERMISSION_CHANGE", "OWNERSHIP_CHANGE", - "WRITE", "OPEN" ], "default": "CREATE" diff --git a/generated/api/v1/detection_service.swagger.json b/generated/api/v1/detection_service.swagger.json index e36aedd56f64c..ba16f7d267757 100644 --- a/generated/api/v1/detection_service.swagger.json +++ b/generated/api/v1/detection_service.swagger.json @@ -293,7 +293,6 @@ "RENAME", "PERMISSION_CHANGE", "OWNERSHIP_CHANGE", - "WRITE", "OPEN" ], "default": "CREATE" diff --git a/generated/storage/file_access.pb.go b/generated/storage/file_access.pb.go index 53425bfc843d1..96c13fb6edb47 100644 --- a/generated/storage/file_access.pb.go +++ b/generated/storage/file_access.pb.go @@ -30,8 +30,7 @@ const ( FileAccess_RENAME FileAccess_Operation = 2 FileAccess_PERMISSION_CHANGE FileAccess_Operation = 3 FileAccess_OWNERSHIP_CHANGE FileAccess_Operation = 4 - FileAccess_WRITE FileAccess_Operation = 5 - FileAccess_OPEN FileAccess_Operation = 6 + FileAccess_OPEN FileAccess_Operation = 5 ) // Enum value maps for FileAccess_Operation. @@ -42,8 +41,7 @@ var ( 2: "RENAME", 3: "PERMISSION_CHANGE", 4: "OWNERSHIP_CHANGE", - 5: "WRITE", - 6: "OPEN", + 5: "OPEN", } FileAccess_Operation_value = map[string]int32{ "CREATE": 0, @@ -51,8 +49,7 @@ var ( "RENAME": 2, "PERMISSION_CHANGE": 3, "OWNERSHIP_CHANGE": 4, - "WRITE": 5, - "OPEN": 6, + "OPEN": 5, } ) @@ -327,7 +324,7 @@ var File_storage_file_access_proto protoreflect.FileDescriptor const file_storage_file_access_proto_rawDesc = "" + "\n" + - "\x19storage/file_access.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1fstorage/process_indicator.proto\"\xa6\x05\n" + + "\x19storage/file_access.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1fstorage/process_indicator.proto\"\x9b\x05\n" + "\n" + "FileAccess\x12,\n" + "\x04file\x18\x01 \x01(\v2\x18.storage.FileAccess.FileR\x04file\x12;\n" + @@ -346,7 +343,7 @@ const file_storage_file_access_proto_rawDesc = "" + "\x0eeffective_path\x18\x01 \x01(\tR\reffectivePath\x12\x1f\n" + "\vactual_path\x18\x02 \x01(\tR\n" + "actualPath\x124\n" + - "\x04meta\x18\x03 \x01(\v2 .storage.FileAccess.FileMetadataR\x04meta\"q\n" + + "\x04meta\x18\x03 \x01(\v2 .storage.FileAccess.FileMetadataR\x04meta\"f\n" + "\tOperation\x12\n" + "\n" + "\x06CREATE\x10\x00\x12\n" + @@ -355,9 +352,8 @@ const file_storage_file_access_proto_rawDesc = "" + "\n" + "\x06RENAME\x10\x02\x12\x15\n" + "\x11PERMISSION_CHANGE\x10\x03\x12\x14\n" + - "\x10OWNERSHIP_CHANGE\x10\x04\x12\t\n" + - "\x05WRITE\x10\x05\x12\b\n" + - "\x04OPEN\x10\x06B.\n" + + "\x10OWNERSHIP_CHANGE\x10\x04\x12\b\n" + + "\x04OPEN\x10\x05B.\n" + "\x19io.stackrox.proto.storageZ\x11./storage;storageb\x06proto3" var ( diff --git a/pkg/booleanpolicy/validate_test.go b/pkg/booleanpolicy/validate_test.go index 405633ca8cc72..db3dad980742e 100644 --- a/pkg/booleanpolicy/validate_test.go +++ b/pkg/booleanpolicy/validate_test.go @@ -115,7 +115,7 @@ func (s *PolicyValueValidator) TestRegex() { }, { name: "file operation", - valid: []string{"OPEN", "CREATE", "RENAME", "UNLINK", "OWNERSHIP_CHANGE", "PERMISSION_CHANGE", "open", "create", "rename", "unlink", "ownership_change", "permission_change", "Open", "Create"}, + valid: []string{"OPEN", "CREATE", "UNLINK", "OWNERSHIP_CHANGE", "PERMISSION_CHANGE", "open", "create", "unlink", "ownership_change", "permission_change", "Open", "Create"}, invalid: []string{"", " ", "READ", "WRITE", "DELETE", "INVALID_OPERATION", "MODIFY", "ACCESS"}, r: fileOperationRegex, }, diff --git a/pkg/booleanpolicy/value_regex.go b/pkg/booleanpolicy/value_regex.go index 3a2f2b081ce76..ee4422b2ebeae 100644 --- a/pkg/booleanpolicy/value_regex.go +++ b/pkg/booleanpolicy/value_regex.go @@ -48,7 +48,7 @@ var ( ipAddressValueRegex = createRegex(fmt.Sprintf(`(%s)|(%s)`, ipv4Regex, ipv6Regex)) signatureIntegrationIDValueRegex = createRegex(regexp.QuoteMeta(signatures.SignatureIntegrationIDPrefix) + "[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}") allowedFilePathRegex = createRegex(fmt.Sprintf("(?i:%s)", strings.Join(filePathAllowedStrings, "|"))) - fileOperationRegex = createRegex(`(?i:OPEN|CREATE|RENAME|UNLINK|OWNERSHIP_CHANGE|PERMISSION_CHANGE)`) + fileOperationRegex = createRegex(`(?i:OPEN|CREATE|UNLINK|OWNERSHIP_CHANGE|PERMISSION_CHANGE)`) ) func createRegex(s string) *regexp.Regexp { diff --git a/pkg/booleanpolicy/violationmessages/printer/file_access_test.go b/pkg/booleanpolicy/violationmessages/printer/file_access_test.go index 60eb28eae4d4e..a771b8f7c3308 100644 --- a/pkg/booleanpolicy/violationmessages/printer/file_access_test.go +++ b/pkg/booleanpolicy/violationmessages/printer/file_access_test.go @@ -94,15 +94,6 @@ func TestUpdateFileAccessMessage(t *testing.T) { }, expected: "'/tmp/chown_file' accessed (OWNERSHIP_CHANGE)", }, - { - desc: "file WRITE operation", - activity: &storage.FileAccess{ - File: &storage.FileAccess_File{ActualPath: "/etc/passwd"}, - Operation: storage.FileAccess_WRITE, - Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "vim"}}, - }, - expected: "'/etc/passwd' accessed (WRITE)", - }, { desc: "nil file path handling", activity: &storage.FileAccess{ diff --git a/proto/storage/file_access.proto b/proto/storage/file_access.proto index c702ad5189a54..713e233167005 100644 --- a/proto/storage/file_access.proto +++ b/proto/storage/file_access.proto @@ -45,8 +45,7 @@ message FileAccess { RENAME = 2; PERMISSION_CHANGE = 3; OWNERSHIP_CHANGE = 4; - WRITE = 5; - OPEN = 6; + OPEN = 5; } File file = 1; diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 675c3ca9ab68b..76dfd308e6c5e 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -8408,13 +8408,9 @@ "name": "OWNERSHIP_CHANGE", "integer": 4 }, - { - "name": "WRITE", - "integer": 5 - }, { "name": "OPEN", - "integer": 6 + "integer": 5 } ] } diff --git a/sensor/common/detector/detector.go b/sensor/common/detector/detector.go index 0cd3cae27c3cb..2c140ca7cdcf4 100644 --- a/sensor/common/detector/detector.go +++ b/sensor/common/detector/detector.go @@ -907,6 +907,7 @@ func (d *detectorImpl) processFileAccess() { continue } + log.Debugf("%d violations for '%v' (%s)", len(alerts), item.Access.GetFile().GetEffectivePath(), item.Access.GetOperation()) alertResults := ¢ral.AlertResults{ DeploymentId: item.Access.GetProcess().GetDeploymentId(), Alerts: alerts, diff --git a/sensor/common/filesystem/pipeline/pipeline.go b/sensor/common/filesystem/pipeline/pipeline.go index a9695fadfbae1..05832daf81631 100644 --- a/sensor/common/filesystem/pipeline/pipeline.go +++ b/sensor/common/filesystem/pipeline/pipeline.go @@ -11,7 +11,6 @@ import ( "github.com/stackrox/rox/pkg/uuid" "github.com/stackrox/rox/sensor/common/clusterentities" "github.com/stackrox/rox/sensor/common/detector" - fsUtils "github.com/stackrox/rox/sensor/common/filesystem/utils" ) var ( @@ -95,12 +94,6 @@ func (p *Pipeline) translate(fs *sensorAPI.FileActivity) *storage.FileAccess { }, } access.Operation = storage.FileAccess_OWNERSHIP_CHANGE - case *sensorAPI.FileActivity_Write: - access.File = &storage.FileAccess_File{ - EffectivePath: fs.GetWrite().GetActivity().GetPath(), - ActualPath: fs.GetWrite().GetActivity().GetHostPath(), - } - access.Operation = storage.FileAccess_WRITE case *sensorAPI.FileActivity_Open: access.File = &storage.FileAccess_File{ EffectivePath: fs.GetOpen().GetActivity().GetPath(), @@ -112,11 +105,6 @@ func (p *Pipeline) translate(fs *sensorAPI.FileActivity) *storage.FileAccess { return nil } - if fsUtils.IsNodeFileAccess(access) { - // TODO: remove when full host path resolution is complete - access.File.ActualPath = access.GetFile().GetEffectivePath() - } - return access } @@ -154,7 +142,6 @@ func (p *Pipeline) getIndicator(process *sensorAPI.ProcessSignal) *storage.Proce return pi } - // TODO(ROX-30798): Enrich file system events with deployment details metadata, ok, _ := p.clusterEntities.LookupByContainerID(process.GetContainerId()) if !ok { // unexpected - process should exist before file activity is diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx index de70ac15982c8..4d49a3e49c593 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx @@ -130,10 +130,9 @@ const APIVerbs: DescriptorOption[] = ['CREATE', 'DELETE', 'GET', 'PATCH', 'UPDAT })); const fileOperationOptions: DescriptorOption[] = [ - ['OPEN', 'Open'], + ['OPEN', 'Open (Writable)'], ['CREATE', 'Create'], ['UNLINK', 'Delete'], - ['RENAME', 'Rename'], ['PERMISSION_CHANGE', 'Permission change'], ['OWNERSHIP_CHANGE', 'Ownership change'], ].map(([value, label]) => ({ value, label })); diff --git a/ui/apps/platform/src/types/fileAccess.proto.ts b/ui/apps/platform/src/types/fileAccess.proto.ts index 8fa08ffba16d1..328bd4ca5ee70 100644 --- a/ui/apps/platform/src/types/fileAccess.proto.ts +++ b/ui/apps/platform/src/types/fileAccess.proto.ts @@ -29,5 +29,4 @@ export type FileOperation = | 'RENAME' | 'PERMISSION_CHANGE' | 'OWNERSHIP_CHANGE' - | 'WRITE' | 'OPEN'; From ea0cfa3062fc32edf9afa8cb67c65105c69cc21b Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Mon, 26 Jan 2026 16:49:10 -0600 Subject: [PATCH 016/232] ROX-32422: remove filtering datasource for RH vulns (#18679) --- pkg/scanners/scannerv4/convert.go | 10 ++-------- pkg/scanners/scannerv4/convert_test.go | 7 ------- 2 files changed, 2 insertions(+), 15 deletions(-) diff --git a/pkg/scanners/scannerv4/convert.go b/pkg/scanners/scannerv4/convert.go index 86b2f14136e9c..f3078dba9e9f4 100644 --- a/pkg/scanners/scannerv4/convert.go +++ b/pkg/scanners/scannerv4/convert.go @@ -234,9 +234,8 @@ func vulnerabilities(vulnerabilities map[string]*v4.VulnerabilityReport_Vulnerab // - Equality comparisons // - Storage/retrieval as a database key // -// The datasource will NOT be populated for Red Hat sourced vulnerabilities to discourage -// usage given additional fields are required to represent their uniqueness that are not currently -// exposed to Scanner V4 clients. +// For Red Hat vulns the product (repo, cpe, etc.) is also needed to uniquely represent the +// vuln which is NOT included in the returned datasource. // // Examples: // - OS vulnerabilities: "updater::os" (e.g., "debian-bookworm-updater::debian:12") @@ -248,11 +247,6 @@ func vulnDataSource(ccVuln *v4.VulnerabilityReport_Vulnerability, os string) str return "" } - // Do not populate datasource if vuln comes from Red Hat. - if strings.HasPrefix(ccVuln.GetUpdater(), "rhel") { - return "" - } - if os == "" { // ie: "osv/go", "nvd" return ccVuln.GetUpdater() diff --git a/pkg/scanners/scannerv4/convert_test.go b/pkg/scanners/scannerv4/convert_test.go index e330d6da7bdf7..70880c6e0b55b 100644 --- a/pkg/scanners/scannerv4/convert_test.go +++ b/pkg/scanners/scannerv4/convert_test.go @@ -1605,13 +1605,6 @@ func TestVulnDataSource(t *testing.T) { os: "os", ccVuln: &v4.VulnerabilityReport_Vulnerability{}, }, - { - expected: "", - os: "os", - ccVuln: &v4.VulnerabilityReport_Vulnerability{ - Updater: "rhel-vex", - }, - }, { expected: "updater", os: "", From 07dc3578882dea8b7eb55b91f81caf4ad2604212 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Jan 2026 20:17:57 -0500 Subject: [PATCH 017/232] chore(deps): bump lodash from 4.17.21 to 4.17.23 in /ui/apps/platform (#18619) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/apps/platform/package-lock.json | 9 +++++---- ui/apps/platform/package.json | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ui/apps/platform/package-lock.json b/ui/apps/platform/package-lock.json index dc627cd569594..577436a9fe529 100644 --- a/ui/apps/platform/package-lock.json +++ b/ui/apps/platform/package-lock.json @@ -44,7 +44,7 @@ "js-base64": "^3.7.2", "jspdf": "^4.0.0", "jspdf-autotable": "^5.0.7", - "lodash": "^4.17.21", + "lodash": "^4.17.23", "mobx": "^6.13.7", "mobx-react": "^7.6.0", "object-resolve-path": "^1.1.1", @@ -12154,9 +12154,10 @@ } }, "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha1-Z5WRxWTDv/quhFTPCz3zcMPWkRw= sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + "version": "4.17.23", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz", + "integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==", + "license": "MIT" }, "node_modules/lodash-es": { "version": "4.17.21", diff --git a/ui/apps/platform/package.json b/ui/apps/platform/package.json index ddcc832297af0..d430234ac968b 100644 --- a/ui/apps/platform/package.json +++ b/ui/apps/platform/package.json @@ -47,7 +47,7 @@ "js-base64": "^3.7.2", "jspdf": "^4.0.0", "jspdf-autotable": "^5.0.7", - "lodash": "^4.17.21", + "lodash": "^4.17.23", "mobx": "^6.13.7", "mobx-react": "^7.6.0", "object-resolve-path": "^1.1.1", From c45742f318d56bc9e45d7585f96c2ab229289490 Mon Sep 17 00:00:00 2001 From: Kyle Lape Date: Mon, 26 Jan 2026 20:21:09 -0500 Subject: [PATCH 018/232] ROX-30781: Support additional-ca in config-controller (#18617) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Config-controller fails to connect to Central when Central's certificate is issued by a custom CA added via the `additional-ca` secret. Also, the gRPC client was not using the system cert pool, so imported CAs were being ignored. Changes: * Add entrypoint wrapper symlink (`image/rhel/static-bin/config-controller` → `entrypoint-wrapper.sh`) which runs `import-additional-cas` at startup * Update helm chart to use the entrypoint wrapper (`/stackrox/config-controller` instead of `/stackrox/bin/config-controller`) * Update helm chart to mount `additional-ca` secret and required volumes for the `import-additional-cas` script * Use `verifier.SystemCertPool()` to include additional CAs in the TLS root CA pool * Add support for `ROX_CENTRAL_ENDPOINT` env var to configure the Central endpoint (defaults to `central..svc:443`) * Set TLS `ServerName` based on the endpoint hostname for correct certificate verification * Add E2E test that verifies config-controller can connect through a proxy with a custom CA certificate Co-authored-by: Claude Opus 4.5 Co-authored-by: Marcin Owsiany --- config-controller/pkg/client/client.go | 35 +- image/rhel/static-bin/config-controller | 1 + .../02-config-controller-02-deployment.yaml | 19 +- operator/tests/common/central-cr-assert.yaml | 6 +- operator/tests/common/pods-debug.yaml | 7 +- .../upgrade/021-assert-status-conditions.yaml | 35 ++ tests/common.go | 17 + tests/config-controller-ca/certs.go | 20 + tests/config-controller-ca/generate-certs.sh | 61 +++ tests/config-controller-ca/root-ca.crt | 25 ++ tests/config-controller-ca/server.crt | 27 ++ tests/config-controller-ca/server.key | 28 ++ tests/config_controller_additional_ca_test.go | 349 ++++++++++++++++++ tests/tls_challenge_test.go | 13 +- 14 files changed, 625 insertions(+), 18 deletions(-) create mode 120000 image/rhel/static-bin/config-controller create mode 100644 tests/config-controller-ca/certs.go create mode 100755 tests/config-controller-ca/generate-certs.sh create mode 100644 tests/config-controller-ca/root-ca.crt create mode 100644 tests/config-controller-ca/server.crt create mode 100644 tests/config-controller-ca/server.key create mode 100644 tests/config_controller_additional_ca_test.go diff --git a/config-controller/pkg/client/client.go b/config-controller/pkg/client/client.go index 8c78d8b27409c..62ae989899590 100644 --- a/config-controller/pkg/client/client.go +++ b/config-controller/pkg/client/client.go @@ -15,15 +15,27 @@ import ( "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/mtls" + "github.com/stackrox/rox/pkg/mtls/verifier" + "github.com/stackrox/rox/pkg/netutil" "github.com/stackrox/rox/pkg/size" "google.golang.org/grpc" ) var ( - centralHostPort = fmt.Sprintf("central.%s.svc:443", env.Namespace.Setting()) - log = logging.LoggerForModule() + log = logging.LoggerForModule() ) +func centralEndpoint() string { + // Check if ROX_CENTRAL_ENDPOINT is explicitly set (not just the default value). + // This provides backwards compatibility for deployments that don't set ROX_CENTRAL_ENDPOINT. + endpoint, exists := os.LookupEnv("ROX_CENTRAL_ENDPOINT") + if exists && endpoint != "" { + return endpoint + } + // Fall back to namespace-based endpoint for backwards compatibility + return fmt.Sprintf("central.%s.svc:443", env.Namespace.Setting()) +} + type perRPCCreds struct { svc v1.AuthServiceClient metadata map[string]string @@ -98,6 +110,19 @@ type grpcClient struct { func newGrpcClient(ctx context.Context) (CentralClient, error) { clientconn.SetUserAgent(clientconn.ConfigController) + // Use system cert pool to include additional CAs from import-additional-cas + rootCAs, err := verifier.SystemCertPool() + if err != nil { + return nil, errors.Wrap(err, "failed to get system cert pool") + } + + // Parse the endpoint to get the hostname for TLS ServerName verification + endpoint := centralEndpoint() + host, _, _, err := netutil.ParseEndpoint(endpoint) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse central endpoint %q", endpoint) + } + dialOpts := []grpc.DialOption{ grpc.WithNoProxy(), } @@ -108,11 +133,15 @@ func newGrpcClient(ctx context.Context) (CentralClient, error) { InsecureAllowCredsViaPlaintext: false, DialOptions: dialOpts, PerRPCCreds: perRPCCreds, + TLS: clientconn.TLSConfigOptions{ + RootCAs: rootCAs, + ServerName: host, + }, } callOpts := []grpc.CallOption{grpc.MaxCallRecvMsgSize(12 * size.MB)} - conn, err := clientconn.GRPCConnection(ctx, mtls.CentralSubject, centralHostPort, opts, grpc.WithDefaultCallOptions(callOpts...)) + conn, err := clientconn.GRPCConnection(ctx, mtls.CentralSubject, endpoint, opts, grpc.WithDefaultCallOptions(callOpts...)) if err != nil { return nil, errors.Wrap(err, "Failed to create gRPC connection") diff --git a/image/rhel/static-bin/config-controller b/image/rhel/static-bin/config-controller new file mode 120000 index 0000000000000..1f7856e02c68e --- /dev/null +++ b/image/rhel/static-bin/config-controller @@ -0,0 +1 @@ +entrypoint-wrapper.sh \ No newline at end of file diff --git a/image/templates/helm/stackrox-central/templates/02-config-controller-02-deployment.yaml b/image/templates/helm/stackrox-central/templates/02-config-controller-02-deployment.yaml index bdb9aef64f20b..b2910b93aeb2b 100644 --- a/image/templates/helm/stackrox-central/templates/02-config-controller-02-deployment.yaml +++ b/image/templates/helm/stackrox-central/templates/02-config-controller-02-deployment.yaml @@ -54,7 +54,7 @@ spec: - name: manager image: {{ ._rox.central.image.fullRef | quote }} command: - - /stackrox/bin/config-controller + - /stackrox/config-controller args: - --health-probe-bind-address=:8081 resources: @@ -64,6 +64,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: ROX_CENTRAL_ENDPOINT + value: central.{{ .Release.Namespace }}.svc:443 {{- include "srox.envVars" (list . "deployment" "config-controller" "manager") | nindent 8 }} livenessProbe: httpGet: @@ -88,6 +90,13 @@ spec: - mountPath: /run/secrets/stackrox.io/certs/ name: central-certs-volume readOnly: true + - name: additional-ca-volume + mountPath: /usr/local/share/ca-certificates/ + readOnly: true + - name: etc-ssl-volume + mountPath: /etc/ssl + - name: etc-pki-volume + mountPath: /etc/pki/ca-trust terminationGracePeriodSeconds: 10 volumes: - name: central-certs-volume @@ -97,4 +106,12 @@ spec: - key: ca.pem path: ca.pem secretName: central-tls + - name: additional-ca-volume + secret: + secretName: additional-ca + optional: true + - name: etc-ssl-volume + emptyDir: {} + - name: etc-pki-volume + emptyDir: {} {{- end }} diff --git a/operator/tests/common/central-cr-assert.yaml b/operator/tests/common/central-cr-assert.yaml index 76a12f314838a..34b1436ea103c 100644 --- a/operator/tests/common/central-cr-assert.yaml +++ b/operator/tests/common/central-cr-assert.yaml @@ -7,6 +7,9 @@ collectors: - type: pod selector: app=central-db tail: -1 +- type: pod + selector: app=config-controller + tail: -1 - type: pod selector: app=scanner tail: -1 @@ -24,13 +27,14 @@ collectors: tail: -1 - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central-db +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=config-controller - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-db - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-indexer - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-matcher - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-db -# Please keep the above lists in sync with pods-debug.yaml - command: retry-kubectl.sh describe central.platform.stackrox.io -n $NAMESPACE stackrox-central-services +# Please keep the above lists in sync with pods-debug.yaml and 021-assert-status-conditions.yaml resourceRefs: - apiVersion: platform.stackrox.io/v1alpha1 kind: Central diff --git a/operator/tests/common/pods-debug.yaml b/operator/tests/common/pods-debug.yaml index 2ea16d66f1c79..546cf505d8d7c 100644 --- a/operator/tests/common/pods-debug.yaml +++ b/operator/tests/common/pods-debug.yaml @@ -7,6 +7,9 @@ collectors: - type: pod selector: app=central-db tail: -1 +- type: pod + selector: app=config-controller + tail: -1 - type: pod selector: app=scanner tail: -1 @@ -24,12 +27,14 @@ collectors: tail: -1 - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central-db +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=config-controller - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-db - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-indexer - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-matcher - command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-db -# Please keep the above lists in sync with central-cr-assert.yaml +- command: retry-kubectl.sh describe central.platform.stackrox.io -n $NAMESPACE stackrox-central-services +# Please keep the above lists in sync with central-cr-assert.yaml and 021-assert-status-conditions.yaml - type: pod selector: app=sensor tail: -1 diff --git a/operator/tests/upgrade/upgrade/021-assert-status-conditions.yaml b/operator/tests/upgrade/upgrade/021-assert-status-conditions.yaml index f03293c507b1c..f633504be90c5 100644 --- a/operator/tests/upgrade/upgrade/021-assert-status-conditions.yaml +++ b/operator/tests/upgrade/upgrade/021-assert-status-conditions.yaml @@ -1,5 +1,40 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert +collectors: +- type: pod + selector: app=central + tail: -1 +- type: pod + selector: app=central-db + tail: -1 +- type: pod + selector: app=config-controller + tail: -1 +- type: pod + selector: app=scanner + tail: -1 +- type: pod + selector: app=scanner-db + tail: -1 +- type: pod + selector: app=scanner-v4-indexer + tail: -1 +- type: pod + selector: app=scanner-v4-matcher + tail: -1 +- type: pod + selector: app=scanner-v4-db + tail: -1 +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=central-db +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=config-controller +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-db +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-indexer +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-matcher +- command: retry-kubectl.sh describe pod -n $NAMESPACE -l app=scanner-v4-db +- command: retry-kubectl.sh describe central.platform.stackrox.io -n $NAMESPACE stackrox-central-services +# Please keep the above lists in sync with pods-debug.yaml and central-cr-assert.yaml resourceRefs: - apiVersion: platform.stackrox.io/v1alpha1 kind: Central diff --git a/tests/common.go b/tests/common.go index 31ea6c7abbadc..cc928543ad48b 100644 --- a/tests/common.go +++ b/tests/common.go @@ -5,6 +5,7 @@ package tests import ( "bytes" "context" + "encoding/json" "fmt" "io" "math" @@ -16,6 +17,7 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/docker/config" "github.com/stackrox/rox/pkg/pointers" "github.com/stackrox/rox/pkg/retry" "github.com/stackrox/rox/pkg/retryablehttp" @@ -943,6 +945,21 @@ func (ks *KubernetesSuite) ensureSecretExists(ctx context.Context, namespace str } } +// ensureQuayImagePullSecretExists creates an image pull secret for quay.io using credentials from +// REGISTRY_USERNAME and REGISTRY_PASSWORD environment variables. This is a common pattern across e2e tests. +func (ks *KubernetesSuite) ensureQuayImagePullSecretExists(ctx context.Context, namespace string, secretName string) { + configBytes, err := json.Marshal(config.DockerConfigJSON{ + Auths: map[string]config.DockerConfigEntry{ + "https://quay.io": { + Username: mustGetEnv(ks.T(), "REGISTRY_USERNAME"), + Password: mustGetEnv(ks.T(), "REGISTRY_PASSWORD"), + }, + }, + }) + ks.Require().NoError(err, "cannot serialize docker config for image pull secret %q in namespace %q", secretName, namespace) + ks.ensureSecretExists(ctx, namespace, secretName, coreV1.SecretTypeDockerConfigJson, map[string][]byte{coreV1.DockerConfigJsonKey: configBytes}) +} + // ensureConfigMapExists creates a k8s ConfigMap object. If one exists, it makes sure the data matches. func (ks *KubernetesSuite) ensureConfigMapExists(ctx context.Context, namespace string, name string, data map[string]string) { cm := &coreV1.ConfigMap{ diff --git a/tests/config-controller-ca/certs.go b/tests/config-controller-ca/certs.go new file mode 100644 index 0000000000000..fe6ebf78da4c4 --- /dev/null +++ b/tests/config-controller-ca/certs.go @@ -0,0 +1,20 @@ +// Package config_controller_ca contains test certificates for config-controller additional CA testing. +package config_controller_ca + +import _ "embed" + +// RootCACert is the root CA certificate used to sign the server certificate. +// This CA should be added to the additional-ca secret for config-controller to trust. +// +//go:embed root-ca.crt +var RootCACert []byte + +// ServerCert is the server certificate with SANs for nginx-proxy.qa-config-controller-ca. +// +//go:embed server.crt +var ServerCert []byte + +// ServerKey is the private key for the server certificate. +// +//go:embed server.key +var ServerKey []byte diff --git a/tests/config-controller-ca/generate-certs.sh b/tests/config-controller-ca/generate-certs.sh new file mode 100755 index 0000000000000..b8f15745106fd --- /dev/null +++ b/tests/config-controller-ca/generate-certs.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# Generates CA and server certificates for config-controller additional CA tests. +# The server cert has SANs for the qa-config-controller-ca namespace. + +set -euo pipefail + +cd "$(dirname "$0")" + +NAMESPACE="qa-config-controller-ca" +SERVICE_NAME="nginx-proxy" + +echo "=== Generating Root CA ===" +openssl genrsa -out root-ca.key 3072 +openssl req -x509 -nodes -sha256 -new -key root-ca.key -out root-ca.crt -days $((50*365)) \ + -subj "/CN=Config Controller Test CA" \ + -addext "keyUsage = critical, keyCertSign" \ + -addext "basicConstraints = critical, CA:TRUE, pathlen:0" \ + -addext "subjectKeyIdentifier = hash" + +echo "=== Generating Server Certificate ===" +openssl genrsa -out server.key 2048 +openssl req -sha256 -new -key server.key -out server.csr \ + -subj "/CN=${SERVICE_NAME}.${NAMESPACE}/O=StackRox Tests/OU=Config Controller QA" \ + -reqexts SAN -config <(cat < Date: Tue, 27 Jan 2026 07:25:06 +0530 Subject: [PATCH 019/232] chore(be): Fix Image datastore unit tests (#18687) --- .../datastore_impl_flat_postgres_test.go | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/central/image/datastore/datastore_impl_flat_postgres_test.go b/central/image/datastore/datastore_impl_flat_postgres_test.go index 678fce8472a51..de8570adc0b0a 100644 --- a/central/image/datastore/datastore_impl_flat_postgres_test.go +++ b/central/image/datastore/datastore_impl_flat_postgres_test.go @@ -20,6 +20,7 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" pkgCVE "github.com/stackrox/rox/pkg/cve" + "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/fixtures" imageTypes "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/postgres" @@ -352,9 +353,14 @@ func (s *ImageFlatPostgresDataStoreTestSuite) TestImageDeletes() { storedImage, found, err := s.datastore.GetImage(ctx, testImage.GetId()) s.NoError(err) s.True(found) - for _, component := range testImage.GetScan().GetComponents() { - for _, cve := range component.GetVulns() { - cve.FirstSystemOccurrence = storedImage.GetLastUpdated() + for compI, component := range testImage.GetScan().GetComponents() { + for cveI, cve := range component.GetVulns() { + if features.CVEFixTimestampCriteria.Enabled() { + cve.FirstSystemOccurrence = storedImage.GetScan().GetComponents()[compI].GetVulns()[cveI].GetFirstSystemOccurrence() + cve.FixAvailableTimestamp = storedImage.GetScan().GetComponents()[compI].GetVulns()[cveI].GetFixAvailableTimestamp() + } else { + cve.FirstSystemOccurrence = storedImage.GetLastUpdated() + } cve.FirstImageOccurrence = storedImage.GetLastUpdated() cve.VulnerabilityTypes = []storage.EmbeddedVulnerability_VulnerabilityType{storage.EmbeddedVulnerability_IMAGE_VULNERABILITY} } @@ -406,7 +412,7 @@ func (s *ImageFlatPostgresDataStoreTestSuite) TestImageDeletes() { s.True(found) for _, component := range testImage2.GetScan().GetComponents() { for _, cve := range component.GetVulns() { - // System Occurrence remains unchanged. + // System Occurrence and fix available times remain unchanged. cve.FirstImageOccurrence = storedImage.GetLastUpdated() cve.VulnerabilityTypes = []storage.EmbeddedVulnerability_VulnerabilityType{storage.EmbeddedVulnerability_IMAGE_VULNERABILITY} } @@ -440,10 +446,15 @@ func (s *ImageFlatPostgresDataStoreTestSuite) TestImageDeletes() { storedImage, found, err = s.datastore.GetImage(ctx, testImage2.GetId()) s.NoError(err) s.True(found) - for _, component := range testImage2.GetScan().GetComponents() { + for compI, component := range testImage2.GetScan().GetComponents() { // Components and Vulns are deduped, therefore, update testImage structure. - for _, cve := range component.GetVulns() { - cve.FirstSystemOccurrence = storedImage.GetLastUpdated() + for cveI, cve := range component.GetVulns() { + if features.CVEFixTimestampCriteria.Enabled() { + cve.FirstSystemOccurrence = storedImage.GetScan().GetComponents()[compI].GetVulns()[cveI].GetFirstSystemOccurrence() + cve.FixAvailableTimestamp = storedImage.GetScan().GetComponents()[compI].GetVulns()[cveI].GetFixAvailableTimestamp() + } else { + cve.FirstSystemOccurrence = storedImage.GetLastUpdated() + } cve.FirstImageOccurrence = storedImage.GetLastUpdated() } } From 8198e67eb8296027acd86ba130ca7b848813ec5e Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Tue, 27 Jan 2026 06:24:47 +0100 Subject: [PATCH 020/232] chore: use a branding-neutral `image:` in source files (#18592) --- operator/Makefile | 5 +---- .../manifests/rhacs-operator.clusterserviceversion.yaml | 4 ++-- operator/config/manager/kustomization.yaml | 6 ------ .../bases/rhacs-operator.clusterserviceversion.yaml | 2 +- .../bases/rhacs-operator.clusterserviceversion.yaml | 2 +- 5 files changed, 5 insertions(+), 14 deletions(-) diff --git a/operator/Makefile b/operator/Makefile index 98e4085fa5c57..244c495977e06 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -375,7 +375,7 @@ deploy: check-ci-setup manifests kustomize ## Deploy operator image to the K8s c mkdir config/local-deploy-versioned && \ cd config/local-deploy-versioned && \ $(KUSTOMIZE) create --resources ../default && \ - $(KUSTOMIZE) edit set image quay.io/stackrox-io/stackrox-operator=$(IMG) + $(KUSTOMIZE) edit set image controller=$(IMG) $(KUSTOMIZE) build config/local-deploy-versioned | $(PROJECT_DIR)/hack/retry-kubectl.sh apply -f - .PHONY: undeploy @@ -423,9 +423,6 @@ bundle: yq manifests kustomize operator-sdk ## Generate bundle manifests and met # The operator-sdk command both reads and writes config/manifests by default. # We re-generate it from scratch each time based on a minimal set of inputs, to be able to catch cases when operator-sdk stops working (ROX-25872) rm -rf config/manifests/bases && $(OPERATOR_SDK) generate kustomize manifests --input-dir=config/ui-metadata -# We hardcode the image reference to quay.io/stackrox-io/stackrox-operator. If this is overridden via -# the IMG_REPO environment variable, the final reference will be injected in the bundle-post-process step. - cd config/manager && $(KUSTOMIZE) edit set image controller=quay.io/stackrox-io/stackrox-operator:0.0.1 cd config/scorecard-versioned && $(KUSTOMIZE) edit set image quay.io/operator-framework/scorecard-test=quay.io/operator-framework/scorecard-test:$(OPERATOR_SDK_VERSION) $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) # Fix the createdAt annotation diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index 315dab9e0326d..6905464ea7829 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -13,7 +13,7 @@ metadata: : \"my-cluster\"\n }\n }\n]" capabilities: Seamless Upgrades categories: Security - containerImage: quay.io/stackrox-io/stackrox-operator:0.0.1 + containerImage: controller:latest createdAt: '' description: Red Hat Advanced Cluster Security (RHACS) operator provisions the services necessary to secure each of your OpenShift and Kubernetes clusters. @@ -1992,7 +1992,7 @@ spec: containerName: manager divisor: '0' resource: limits.memory - image: quay.io/stackrox-io/stackrox-operator:0.0.1 + image: controller:latest livenessProbe: httpGet: path: /healthz diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml index 5fdf3254cf926..5c5f0b84cba45 100644 --- a/operator/config/manager/kustomization.yaml +++ b/operator/config/manager/kustomization.yaml @@ -1,8 +1,2 @@ resources: - manager.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -images: -- name: controller - newName: quay.io/stackrox-io/stackrox-operator - newTag: 0.0.1 diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index a77b4fbfdb9f4..7d38e14bcdf6c 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -4,7 +4,7 @@ metadata: annotations: capabilities: Seamless Upgrades categories: Security - containerImage: quay.io/stackrox-io/stackrox-operator:0.0.1 + containerImage: controller:latest createdAt: "1999-12-31T23:59:59Z" description: Red Hat Advanced Cluster Security (RHACS) operator provisions the services necessary to secure each of your OpenShift and Kubernetes clusters. diff --git a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml index ac5a3077dd422..e6317c7cefaa7 100644 --- a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml @@ -4,7 +4,7 @@ metadata: annotations: capabilities: Seamless Upgrades categories: Security - containerImage: quay.io/stackrox-io/stackrox-operator:0.0.1 + containerImage: controller:latest createdAt: "1999-12-31T23:59:59Z" description: Red Hat Advanced Cluster Security (RHACS) operator provisions the services necessary to secure each of your OpenShift and Kubernetes clusters. From c984907809547fc49fe0e432732c2adb178e77ad Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Tue, 27 Jan 2026 08:37:20 +0100 Subject: [PATCH 021/232] tests: improve defaults cross-check (#17966) Co-authored-by: Vlad Bologa --- operator/api/v1alpha1/central_types.go | 4 +-- operator/api/v1alpha1/securedcluster_types.go | 18 +++++++------ .../platform.stackrox.io_centrals.yaml | 4 +-- .../platform.stackrox.io_securedclusters.yaml | 14 +++++++---- .../rhacs-operator.clusterserviceversion.yaml | 25 +++++++++++++------ .../bases/platform.stackrox.io_centrals.yaml | 4 +-- .../platform.stackrox.io_securedclusters.yaml | 14 +++++++---- .../rhacs-operator.clusterserviceversion.yaml | 18 +++++++------ .../internal/central/defaults/scanner_v4.go | 5 +++- operator/internal/central/defaults/static.go | 5 ++++ .../internal/central/defaults/static_test.go | 9 ------- .../extensions/defaulting_cross_check_test.go | 21 ++++++++++++++++ .../common/defaulting_test_helpers/helpers.go | 20 +++++++++++---- .../securedcluster/defaults/scanner_v4.go | 5 +++- .../securedcluster/defaults/static.go | 5 ++++ .../securedcluster/defaults/static_test.go | 10 -------- .../extensions/defaulting_cross_check_test.go | 22 ++++++++++++++++ .../extensions/reconcile_defaulting.go | 9 ++----- 18 files changed, 141 insertions(+), 71 deletions(-) create mode 100644 operator/internal/central/extensions/defaulting_cross_check_test.go create mode 100644 operator/internal/securedcluster/extensions/defaulting_cross_check_test.go diff --git a/operator/api/v1alpha1/central_types.go b/operator/api/v1alpha1/central_types.go index cfd358f1edf39..03b1b6b102ac9 100644 --- a/operator/api/v1alpha1/central_types.go +++ b/operator/api/v1alpha1/central_types.go @@ -548,8 +548,8 @@ type ScannerComponentSpec struct { type ScannerV4Spec struct { // Can be specified as "Enabled" or "Disabled". // If this field is not specified, the following defaulting takes place: - // * for new installations, Scanner V4 is enabled starting with ACS 4.8; - // * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + // * for upgrades to 4.8 from previous releases, the default is: Disabled; + // * for new installations starting with ACS 4.8, the default is: Enabled. //+operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="Scanner V4 component" ScannerComponent *ScannerV4ComponentPolicy `json:"scannerComponent,omitempty"` diff --git a/operator/api/v1alpha1/securedcluster_types.go b/operator/api/v1alpha1/securedcluster_types.go index 3692c7144a3d0..c878994795ea1 100644 --- a/operator/api/v1alpha1/securedcluster_types.go +++ b/operator/api/v1alpha1/securedcluster_types.go @@ -158,8 +158,8 @@ type AdmissionControlComponentSpec struct { ListenOnEvents *bool `json:"listenOnEvents,omitempty"` // Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - // On new deployments starting with version 4.9, defaults to Enabled. - // On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + // On upgrades to 4.9 from previous releases, defaults to Enabled only if at least one of listenOnCreates or listenOnUpdates is true. + // On new deployments starting with version 4.9, the default is: Enabled. //+operator-sdk:csv:customresourcedefinitions:type=spec,order=1 Enforcement *PolicyEnforcement `json:"enforcement,omitempty"` @@ -437,10 +437,14 @@ type LocalScannerComponentSpec struct { // LocalScannerV4ComponentSpec defines settings for the "Scanner V4" component in SecuredClusters type LocalScannerV4ComponentSpec struct { - // If you want to enable the Scanner V4 component set this to "AutoSense" + // If you want to enable the Scanner V4 component set this to "AutoSense". + // A value of "AutoSense" means that Scanner V4 should be installed, + // unless there is a Central resource in the same namespace. + // In that case typically a central Scanner V4 will be deployed as a component of Central. + // A value of "Disabled" means that Scanner V4 should not be installed. // If this field is not specified or set to "Default", the following defaulting takes place: - // * for new installations, Scanner V4 is enabled starting with ACS 4.8; - // * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + // * for upgrades to 4.8 from previous releases, the default is: Disabled; + // * for new installations starting with ACS 4.8, the default is: AutoSense. //+operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Scanner V4 component",order=1 ScannerComponent *LocalScannerV4ComponentPolicy `json:"scannerComponent,omitempty"` @@ -483,7 +487,7 @@ type LocalScannerV4ComponentPolicy string const ( // LocalScannerV4ComponentDefault means that local Scanner V4 will use the default semantics - // to determine whether scannerV4 components should be used. + // to determine whether Scanner V4 components should be used. // Currently this defaults to "Disabled" semantics. // TODO: change default to "AutoSense" semantics with version 4.5 LocalScannerV4ComponentDefault LocalScannerV4ComponentPolicy = "Default" @@ -491,7 +495,7 @@ const ( // unless there is a Central resource in the same namespace. // In that case typically a central Scanner V4 will be deployed as a component of Central LocalScannerV4ComponentAutoSense LocalScannerV4ComponentPolicy = "AutoSense" - // LocalScannerV4ComponentDisabled means that scanner should not be installed. + // LocalScannerV4ComponentDisabled means that Scanner V4 should not be installed. LocalScannerV4ComponentDisabled LocalScannerV4ComponentPolicy = "Disabled" ) diff --git a/operator/bundle/manifests/platform.stackrox.io_centrals.yaml b/operator/bundle/manifests/platform.stackrox.io_centrals.yaml index 1e773b7766a38..ecc7446c169b8 100644 --- a/operator/bundle/manifests/platform.stackrox.io_centrals.yaml +++ b/operator/bundle/manifests/platform.stackrox.io_centrals.yaml @@ -2045,8 +2045,8 @@ spec: description: |- Can be specified as "Enabled" or "Disabled". If this field is not specified, the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: Enabled. enum: - Default - Enabled diff --git a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml index c635753c8960f..51e189b4c716d 100644 --- a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml +++ b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml @@ -83,8 +83,8 @@ spec: enforcement: description: |- Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - On new deployments starting with version 4.9, defaults to Enabled. - On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + On upgrades to 4.9 from previous releases, defaults to Enabled only if at least one of listenOnCreates or listenOnUpdates is true. + On new deployments starting with version 4.9, the default is: Enabled. enum: - Enabled - Disabled @@ -1689,10 +1689,14 @@ spec: type: object scannerComponent: description: |- - If you want to enable the Scanner V4 component set this to "AutoSense" + If you want to enable the Scanner V4 component set this to "AutoSense". + A value of "AutoSense" means that Scanner V4 should be installed, + unless there is a Central resource in the same namespace. + In that case typically a central Scanner V4 will be deployed as a component of Central. + A value of "Disabled" means that Scanner V4 should not be installed. If this field is not specified or set to "Default", the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: AutoSense. enum: - Default - AutoSense diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index 6905464ea7829..b81b775108383 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -808,9 +808,9 @@ spec: If this field is not specified, the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; + * for upgrades to 4.8 from previous releases, the default is: Disabled; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled.' + * for new installations starting with ACS 4.8, the default is: Enabled.' displayName: Scanner V4 component path: scannerV4.scannerComponent - description: Settings pertaining to the indexer deployment. @@ -1177,10 +1177,10 @@ spec: - description: 'Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - On new deployments starting with version 4.9, defaults to Enabled. + On upgrades to 4.9 from previous releases, defaults to Enabled only if at + least one of listenOnCreates or listenOnUpdates is true. - On old deployments, defaults to Enabled if at least one of listenOnCreates - or listenOnUpdates is true.' + On new deployments starting with version 4.9, the default is: Enabled.' displayName: Enforcement path: admissionControl.enforcement - description: 'Enables teams to bypass admission control in a monitored manner @@ -1595,14 +1595,23 @@ spec: in the pod's hosts file. displayName: Host Aliases path: scanner.db.hostAliases - - description: 'If you want to enable the Scanner V4 component set this to "AutoSense" + - description: 'If you want to enable the Scanner V4 component set this to "AutoSense". + + A value of "AutoSense" means that Scanner V4 should be installed, + + unless there is a Central resource in the same namespace. + + In that case typically a central Scanner V4 will be deployed as a component + of Central. + + A value of "Disabled" means that Scanner V4 should not be installed. If this field is not specified or set to "Default", the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; + * for upgrades to 4.8 from previous releases, the default is: Disabled; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled.' + * for new installations starting with ACS 4.8, the default is: AutoSense.' displayName: Scanner V4 component path: scannerV4.scannerComponent - description: Settings pertaining to the indexer deployment. diff --git a/operator/config/crd/bases/platform.stackrox.io_centrals.yaml b/operator/config/crd/bases/platform.stackrox.io_centrals.yaml index 05c81fb9256ac..67801353a860d 100644 --- a/operator/config/crd/bases/platform.stackrox.io_centrals.yaml +++ b/operator/config/crd/bases/platform.stackrox.io_centrals.yaml @@ -2043,8 +2043,8 @@ spec: description: |- Can be specified as "Enabled" or "Disabled". If this field is not specified, the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: Enabled. enum: - Default - Enabled diff --git a/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml b/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml index a6f0bcb2b16cc..168d2f655024d 100644 --- a/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml +++ b/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml @@ -81,8 +81,8 @@ spec: enforcement: description: |- Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - On new deployments starting with version 4.9, defaults to Enabled. - On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + On upgrades to 4.9 from previous releases, defaults to Enabled only if at least one of listenOnCreates or listenOnUpdates is true. + On new deployments starting with version 4.9, the default is: Enabled. enum: - Enabled - Disabled @@ -1687,10 +1687,14 @@ spec: type: object scannerComponent: description: |- - If you want to enable the Scanner V4 component set this to "AutoSense" + If you want to enable the Scanner V4 component set this to "AutoSense". + A value of "AutoSense" means that Scanner V4 should be installed, + unless there is a Central resource in the same namespace. + In that case typically a central Scanner V4 will be deployed as a component of Central. + A value of "Disabled" means that Scanner V4 should not be installed. If this field is not specified or set to "Default", the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: AutoSense. enum: - Default - AutoSense diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index 7d38e14bcdf6c..1bc1d81c05820 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -239,8 +239,8 @@ spec: - description: |- Can be specified as "Enabled" or "Disabled". If this field is not specified, the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: Enabled. displayName: Scanner V4 component path: scannerV4.scannerComponent - description: |- @@ -930,8 +930,8 @@ spec: specDescriptors: - description: |- Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - On new deployments starting with version 4.9, defaults to Enabled. - On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + On upgrades to 4.9 from previous releases, defaults to Enabled only if at least one of listenOnCreates or listenOnUpdates is true. + On new deployments starting with version 4.9, the default is: Enabled. displayName: Enforcement path: admissionControl.enforcement - description: |- @@ -1066,10 +1066,14 @@ spec: displayName: Expose Endpoint path: scannerV4.monitoring.exposeEndpoint - description: |- - If you want to enable the Scanner V4 component set this to "AutoSense" + If you want to enable the Scanner V4 component set this to "AutoSense". + A value of "AutoSense" means that Scanner V4 should be installed, + unless there is a Central resource in the same namespace. + In that case typically a central Scanner V4 will be deployed as a component of Central. + A value of "Disabled" means that Scanner V4 should not be installed. If this field is not specified or set to "Default", the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: AutoSense. displayName: Scanner V4 component path: scannerV4.scannerComponent - description: |- diff --git a/operator/internal/central/defaults/scanner_v4.go b/operator/internal/central/defaults/scanner_v4.go index 7728cf49069a4..eb1dfc449200c 100644 --- a/operator/internal/central/defaults/scanner_v4.go +++ b/operator/internal/central/defaults/scanner_v4.go @@ -91,7 +91,10 @@ func centralScannerV4Defaulting(logger logr.Logger, status *platform.CentralStat annotations[common.FeatureDefaultKeyScannerV4] = string(componentPolicy) } - defaults.ScannerV4 = &platform.ScannerV4Spec{ScannerComponent: &componentPolicy} + if defaults.ScannerV4 == nil { + defaults.ScannerV4 = &platform.ScannerV4Spec{} + } + defaults.ScannerV4.ScannerComponent = &componentPolicy return nil } diff --git a/operator/internal/central/defaults/static.go b/operator/internal/central/defaults/static.go index bdcad53f34203..59de137213684 100644 --- a/operator/internal/central/defaults/static.go +++ b/operator/internal/central/defaults/static.go @@ -91,6 +91,11 @@ var staticDefaults = platform.CentralSpec{ ConfigAsCode: &platform.ConfigAsCodeSpec{ ComponentPolicy: ptr.To(platform.ConfigAsCodeComponentEnabled), }, + Customize: &platform.CustomizeSpec{ + DeploymentDefaults: &platform.DeploymentDefaultsSpec{ + PinToNodes: ptr.To(platform.PinToNodesNone), + }, + }, } var CentralStaticDefaults = CentralDefaultingFlow{ diff --git a/operator/internal/central/defaults/static_test.go b/operator/internal/central/defaults/static_test.go index 7dca9218ecb0f..cd8c800bc71b3 100644 --- a/operator/internal/central/defaults/static_test.go +++ b/operator/internal/central/defaults/static_test.go @@ -5,7 +5,6 @@ import ( "github.com/go-logr/logr" platform "github.com/stackrox/rox/operator/api/v1alpha1" - "github.com/stackrox/rox/operator/internal/common/defaulting_test_helpers" "github.com/stretchr/testify/require" ) @@ -31,11 +30,3 @@ func TestCentralStaticDefaults(t *testing.T) { }) } } - -func TestCentralStaticDefaultsMatchesCRD(t *testing.T) { - centralSpecSchema := defaulting_test_helpers.LoadSpecSchema(t, "centrals") - - t.Run("Defaults", func(t *testing.T) { - defaulting_test_helpers.CheckStruct(t, staticDefaults, centralSpecSchema) - }) -} diff --git a/operator/internal/central/extensions/defaulting_cross_check_test.go b/operator/internal/central/extensions/defaulting_cross_check_test.go new file mode 100644 index 0000000000000..aef240700932b --- /dev/null +++ b/operator/internal/central/extensions/defaulting_cross_check_test.go @@ -0,0 +1,21 @@ +package extensions + +import ( + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stackrox/rox/operator/api/v1alpha1" + "github.com/stackrox/rox/operator/internal/common/defaulting_test_helpers" + "github.com/stretchr/testify/require" +) + +func TestCentralDefaultsMatchCRD(t *testing.T) { + centralSpecSchema := defaulting_test_helpers.LoadSpecSchema(t, "centrals") + c := &v1alpha1.Central{} + for _, flow := range defaultingFlows { + require.NoError(t, flow.DefaultingFunc(testr.New(t), &v1alpha1.CentralStatus{}, map[string]string{}, &c.Spec, &c.Defaults)) + } + t.Run("Defaults", func(t *testing.T) { + defaulting_test_helpers.CheckStruct(t, c.Defaults, centralSpecSchema) + }) +} diff --git a/operator/internal/common/defaulting_test_helpers/helpers.go b/operator/internal/common/defaulting_test_helpers/helpers.go index baab5346741e9..c7570603f8163 100644 --- a/operator/internal/common/defaulting_test_helpers/helpers.go +++ b/operator/internal/common/defaulting_test_helpers/helpers.go @@ -125,9 +125,7 @@ func checkObjectNoDefaults(t *testing.T, schema chartutil.Values) { func checkNoDefaultsInSchema(t *testing.T, schema chartutil.Values) { requireNoDefaultProperty(t, schema) - desc, err := schema.PathValue("description") - require.NoError(t, err) - require.False(t, strings.HasPrefix(desc.(string), defaultPrefix)) + requireNoDefaultInDescription(t, schema) } func getJSONName(t *testing.T, structField reflect.StructField) (field string, embedded bool) { @@ -143,7 +141,7 @@ func checkPtrLeafField(t *testing.T, field reflect.Value, schema chartutil.Value if field.IsNil() { // Operator code specifies no default for this field. // Make sure the schema does not mention one either. - require.Falsef(t, strings.HasPrefix(lastDescriptionLine(t, schema), defaultPrefix), "unexpected default in schema %v", schema) + requireNoDefaultInDescription(t, schema) return } checkLeafField(t, field.Elem(), schema) @@ -164,7 +162,19 @@ func checkLeafField(t *testing.T, inCodeDefault reflect.Value, schema chartutil. } inCodeDefaultDescription := fmt.Sprintf(defaultFormat, inCodeDefaultString) inCRDDefaultDescription := lastDescriptionLine(t, schema) - require.Equal(t, inCodeDefaultDescription, inCRDDefaultDescription) + require.True(t, endsWithCaseInsensitive(inCRDDefaultDescription, inCodeDefaultDescription), "%q should end (modulo case) with %q", inCRDDefaultDescription, inCodeDefaultDescription) +} + +func endsWithCaseInsensitive(s, suffix string) bool { + return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix)) +} + +func requireNoDefaultInDescription(t *testing.T, schema chartutil.Values) { + require.Falsef(t, containsCaseInsensitive(lastDescriptionLine(t, schema), defaultPrefix), "unexpected default in schema description %v", schema) +} + +func containsCaseInsensitive(s, substr string) bool { + return strings.Contains(strings.ToLower(s), strings.ToLower(substr)) } func lastDescriptionLine(t *testing.T, schema chartutil.Values) string { diff --git a/operator/internal/securedcluster/defaults/scanner_v4.go b/operator/internal/securedcluster/defaults/scanner_v4.go index 6cb4119220939..bdef23f31fded 100644 --- a/operator/internal/securedcluster/defaults/scanner_v4.go +++ b/operator/internal/securedcluster/defaults/scanner_v4.go @@ -82,7 +82,10 @@ func securedClusterScannerV4Defaulting(logger logr.Logger, status *platform.Secu annotations[common.FeatureDefaultKeyScannerV4] = string(componentPolicy) } - defaults.ScannerV4 = &platform.LocalScannerV4ComponentSpec{ScannerComponent: &componentPolicy} + if defaults.ScannerV4 == nil { + defaults.ScannerV4 = &platform.LocalScannerV4ComponentSpec{} + } + defaults.ScannerV4.ScannerComponent = &componentPolicy return nil } diff --git a/operator/internal/securedcluster/defaults/static.go b/operator/internal/securedcluster/defaults/static.go index 4ac8f4cf274b4..32f1e123a1a90 100644 --- a/operator/internal/securedcluster/defaults/static.go +++ b/operator/internal/securedcluster/defaults/static.go @@ -68,6 +68,11 @@ var staticDefaults = platform.SecuredClusterSpec{ Network: &platform.GlobalNetworkSpec{ Policies: ptr.To(platform.NetworkPoliciesEnabled), }, + Customize: &platform.CustomizeSpec{ + DeploymentDefaults: &platform.DeploymentDefaultsSpec{ + PinToNodes: ptr.To(platform.PinToNodesNone), + }, + }, } var SecuredClusterStaticDefaults = SecuredClusterDefaultingFlow{ diff --git a/operator/internal/securedcluster/defaults/static_test.go b/operator/internal/securedcluster/defaults/static_test.go index abc9981c436b0..a04d03f857722 100644 --- a/operator/internal/securedcluster/defaults/static_test.go +++ b/operator/internal/securedcluster/defaults/static_test.go @@ -5,7 +5,6 @@ import ( "github.com/go-logr/logr" platform "github.com/stackrox/rox/operator/api/v1alpha1" - "github.com/stackrox/rox/operator/internal/common/defaulting_test_helpers" "github.com/stretchr/testify/require" ) @@ -31,12 +30,3 @@ func TestSecuredClusterStaticDefaults(t *testing.T) { }) } } - -func TestSecuredClusterStaticDefaultsMatchesCRD(t *testing.T) { - t.Setenv("ROX_ADMISSION_CONTROLLER_CONFIG", "true") - SecuredClusterSpecSchema := defaulting_test_helpers.LoadSpecSchema(t, "securedclusters") - - t.Run("Defaults", func(t *testing.T) { - defaulting_test_helpers.CheckStruct(t, staticDefaults, SecuredClusterSpecSchema) - }) -} diff --git a/operator/internal/securedcluster/extensions/defaulting_cross_check_test.go b/operator/internal/securedcluster/extensions/defaulting_cross_check_test.go new file mode 100644 index 0000000000000..f7b6c24ba12c7 --- /dev/null +++ b/operator/internal/securedcluster/extensions/defaulting_cross_check_test.go @@ -0,0 +1,22 @@ +package extensions + +import ( + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stackrox/rox/operator/api/v1alpha1" + "github.com/stackrox/rox/operator/internal/common/defaulting_test_helpers" + "github.com/stretchr/testify/require" +) + +func TestSecuredClusterDefaultsMatchCRD(t *testing.T) { + SecuredClusterSpecSchema := defaulting_test_helpers.LoadSpecSchema(t, "securedclusters") + sc := &v1alpha1.SecuredCluster{} + for _, flow := range defaultingFlows { + require.NoError(t, flow.DefaultingFunc(testr.New(t), &v1alpha1.SecuredClusterStatus{}, map[string]string{}, &sc.Spec, &sc.Defaults)) + } + + t.Run("Defaults", func(t *testing.T) { + defaulting_test_helpers.CheckStruct(t, sc.Defaults, SecuredClusterSpecSchema) + }) +} diff --git a/operator/internal/securedcluster/extensions/reconcile_defaulting.go b/operator/internal/securedcluster/extensions/reconcile_defaulting.go index 37ca6ea95b0c5..725a2283f5118 100644 --- a/operator/internal/securedcluster/extensions/reconcile_defaulting.go +++ b/operator/internal/securedcluster/extensions/reconcile_defaulting.go @@ -10,7 +10,6 @@ import ( platform "github.com/stackrox/rox/operator/api/v1alpha1" "github.com/stackrox/rox/operator/internal/common" "github.com/stackrox/rox/operator/internal/securedcluster/defaults" - "github.com/stackrox/rox/pkg/features" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -19,6 +18,7 @@ import ( var defaultingFlows = []defaults.SecuredClusterDefaultingFlow{ defaults.SecuredClusterStaticDefaults, // Must go first defaults.SecuredClusterScannerV4DefaultingFlow, + defaults.SecuredClusterAdmissionControllerDefaultingFlow, } // FeatureDefaultingExtension executes "defaulting flows". A Secured Cluster defaulting flow is of type @@ -69,15 +69,10 @@ func reconcileFeatureDefaults(ctx context.Context, client ctrlClient.Client, u * // If an update is necessary, it patches the object on the cluster and returns an error to indicate that reconciliation should be retried. // In this case the provided unstructured u will also be updated as part of the patching. func setDefaultsAndPersist(ctx context.Context, logger logr.Logger, u *unstructured.Unstructured, securedCluster *platform.SecuredCluster, client ctrlClient.Client) error { - effectiveDefaultingFlows := defaultingFlows - if features.AdmissionControllerConfig.Enabled() { - effectiveDefaultingFlows = append(effectiveDefaultingFlows, defaults.SecuredClusterAdmissionControllerDefaultingFlow) - } - uBase := u.DeepCopy() patch := ctrlClient.MergeFrom(uBase) - for _, flow := range effectiveDefaultingFlows { + for _, flow := range defaultingFlows { if err := executeSingleDefaultingFlow(logger, u, securedCluster, flow); err != nil { return err } From 6fc06f680ac615d2f5d979d0b8b72bb40a12cd8e Mon Sep 17 00:00:00 2001 From: Yann Brillouet <91869377+rhybrillou@users.noreply.github.com> Date: Tue, 27 Jan 2026 08:55:47 +0100 Subject: [PATCH 022/232] ROX-32351: Add test to detect resource creation (#18233) --- .../builtin_scoped_authorizer_test.go | 75 +++++++++++++++++++ pkg/sac/resources/README.md | 22 ++++++ 2 files changed, 97 insertions(+) create mode 100644 pkg/sac/resources/README.md diff --git a/central/sac/authorizer/builtin_scoped_authorizer_test.go b/central/sac/authorizer/builtin_scoped_authorizer_test.go index c5e82792eb36f..2f4e6a2b102f2 100644 --- a/central/sac/authorizer/builtin_scoped_authorizer_test.go +++ b/central/sac/authorizer/builtin_scoped_authorizer_test.go @@ -834,3 +834,78 @@ func countAllowedResults(xs []bool) int { } return result } + +func TestNoAddedResources(t *testing.T) { + knownResources := []permissions.ResourceMetadata{ + resources.Access, + resources.Administration, + resources.Alert, + resources.CVE, + resources.Cluster, + resources.Compliance, + resources.Deployment, + resources.DeploymentExtension, + resources.Detection, + resources.Image, + resources.ImageAdministration, + resources.Integration, + resources.K8sRole, + resources.K8sRoleBinding, + resources.K8sSubject, + resources.Namespace, + resources.NetworkGraph, + resources.NetworkPolicy, + resources.Node, + resources.Secret, + resources.ServiceAccount, + resources.VirtualMachine, + resources.VulnerabilityManagementApprovals, + resources.VulnerabilityManagementRequests, + resources.WatchedImage, + resources.WorkflowAdministration, + } + knownDisabledResources := ([]permissions.ResourceMetadata)(nil) + knownInternalResources := []permissions.ResourceMetadata{ + resources.ComplianceOperator, + resources.Hash, + resources.InitBundleMeta, + resources.InstallationInfo, + resources.NetworkEntity, + resources.Notifications, + resources.Version, + resources.VulnerabilityRequest, + } + + const failureMessage = "" + + "Please contact the Sensor & Ecosystems team to validate " + + "your addition of resource. For more information about " + + "the resource creation process, check " + + "https://github.com/stackrox/stackrox/tree/master/pkg/sac/resources/README.md ." + + for _, metadata := range resources.ListAllMetadata() { + assert.Contains( + t, + knownResources, + metadata, + failureMessage+" "+metadata.String(), + ) + } + for _, metadata := range resources.ListAllDisabledMetadata() { + assert.Contains( + t, + knownDisabledResources, + metadata, + failureMessage, + metadata.String(), + ) + } + for _, metadata := range resources.ListAllInternalMetadata() { + assert.Contains( + t, + knownInternalResources, + metadata, + failureMessage, + metadata.String(), + ) + } +} diff --git a/pkg/sac/resources/README.md b/pkg/sac/resources/README.md new file mode 100644 index 0000000000000..5d4b140eb8f76 --- /dev/null +++ b/pkg/sac/resources/README.md @@ -0,0 +1,22 @@ +# Access control resources + +The `list.go` file declares the resources used in the product to control +access to services and data. + +## Creating a new resource + +In general, we strive to avoid adding new resources, especially user-facing ones. +Sometimes it is necessary, but most of the time an existing resource is a better fit. +We avoid adding more resources, especially global ones, and strive to re-use existing +authorization patterns as well as existing resources, e.g., by editing +[the object type to resource mapping](https://github.com/stackrox/stackrox/blob/master/tools/generate-helpers/pg-table-bindings/list.go). + +Creating a new resource involves a few steps. + +1. Review the existing resources to find a fit for the object type being added. +2. Justify the need for a new resource (explain why the existing resources are not a good fit). +3. Get in touch with the sensors and ecosystems team to discuss the new resource. +4. Create the new resource in `pkg/sac/resources/list.go`. +5. Declare the new resource for the UI in `ui/apps/platform/src/types/roleResources.ts`. +6. Describe the resource as well as the meaning of read and write operations in `ui/apps/platform/src/Containers/AccessControl/PermissionSets/ResourceDescription.tsx`. +7. Request a review from the `stackrox/sensor-ecosystem` team on the PR introducing the new resource. \ No newline at end of file From 0361ceac7f5a3afd44df2c38a99740546853b403 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jan 2026 08:26:20 +0000 Subject: [PATCH 023/232] chore(deps): bump sigs.k8s.io/controller-runtime from 0.23.0 to 0.23.1 (#18689) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 271f946182140..32077fba84acb 100644 --- a/go.mod +++ b/go.mod @@ -173,7 +173,7 @@ require ( k8s.io/kubelet v0.32.11 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 kubevirt.io/api v1.7.0 - sigs.k8s.io/controller-runtime v0.23.0 + sigs.k8s.io/controller-runtime v0.23.1 sigs.k8s.io/controller-tools v0.20.0 sigs.k8s.io/e2e-framework v0.6.0 sigs.k8s.io/yaml v1.6.0 @@ -531,7 +531,7 @@ require ( sigs.k8s.io/network-policy-api v0.1.5 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/release-utils v0.12.3 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect ) // HOW TO BUMP diff --git a/go.sum b/go.sum index 6b2a28769652c..7858ca10ba5b1 100644 --- a/go.sum +++ b/go.sum @@ -2429,8 +2429,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.23.0 h1:Ubi7klJWiwEWqDY+odSVZiFA0aDSevOCXpa38yCSYu8= -sigs.k8s.io/controller-runtime v0.23.0/go.mod h1:DBOIr9NsprUqCZ1ZhsuJ0wAnQSIxY/C6VjZbmLgw0j0= +sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= +sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= sigs.k8s.io/e2e-framework v0.6.0 h1:p7hFzHnLKO7eNsWGI2AbC1Mo2IYxidg49BiT4njxkrM= @@ -2450,8 +2450,8 @@ sigs.k8s.io/release-utils v0.12.3 h1:iNVJY81QfmMCmXxMg8IvvkkeQNk6ZWlLj+iPKSlKyVQ sigs.k8s.io/release-utils v0.12.3/go.mod h1:BvbNmm1BmM3cnEpBmNHWL3wOSziOdGlsYR8vCFq/Q0o= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From 1cecc4b0a30a89f446dfc4ea9ff058a9dd0960fc Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 27 Jan 2026 10:56:51 +0100 Subject: [PATCH 024/232] ROX-32453: invalidate token cache on 401/403 and retry once (#18668) --- sensor/common/centralproxy/transport.go | 49 +++ sensor/common/centralproxy/transport_test.go | 391 +++++++++++++++++++ 2 files changed, 440 insertions(+) diff --git a/sensor/common/centralproxy/transport.go b/sensor/common/centralproxy/transport.go index c33d005f1861b..aafb7594966b0 100644 --- a/sensor/common/centralproxy/transport.go +++ b/sensor/common/centralproxy/transport.go @@ -1,9 +1,11 @@ package centralproxy import ( + "bytes" "context" "crypto/x509" "fmt" + "io" "net/http" "net/url" "sync/atomic" @@ -16,6 +18,7 @@ import ( pkghttputil "github.com/stackrox/rox/pkg/httputil" "github.com/stackrox/rox/pkg/mtls" "github.com/stackrox/rox/pkg/mtls/verifier" + "github.com/stackrox/rox/pkg/utils" "google.golang.org/grpc" "google.golang.org/protobuf/types/known/durationpb" ) @@ -104,9 +107,50 @@ func (t *scopedTokenTransport) SetClient(conn grpc.ClientConnInterface) { // RoundTrip implements http.RoundTripper. // It reads the namespace scope from the request, obtains an appropriate token, // and injects it into the Authorization header before forwarding the request. +// If Central returns 401 or 403, the cached token is invalidated and the request +// is retried once with a fresh token. func (t *scopedTokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { scope := req.Header.Get(stackroxNamespaceHeader) + // Buffer the request body upfront so we can replay it on retry. + var bodyBytes []byte + if req.Body != nil && req.Body != http.NoBody { + var err error + bodyBytes, err = io.ReadAll(req.Body) + if err != nil { + return nil, errors.Wrap(err, "reading request body") + } + req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + } + + resp, err := t.doRoundTrip(req, scope) + if err != nil { + return nil, err + } + + // If Central returns Unauthorized or Forbidden, invalidate the cached token + // and retry once with a fresh token. + if resp.StatusCode == http.StatusUnauthorized || resp.StatusCode == http.StatusForbidden { + // Consume and close the first response body to reuse the connection. + if resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + utils.IgnoreError(resp.Body.Close) + } + + // Restore the request body for retry. + if bodyBytes != nil { + req.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + } + + t.tokenProvider.invalidateToken(scope) + return t.doRoundTrip(req, scope) + } + + return resp, nil +} + +// doRoundTrip performs a single round trip with token injection. +func (t *scopedTokenTransport) doRoundTrip(req *http.Request, scope string) (*http.Response, error) { token, err := t.tokenProvider.getTokenForScope(req.Context(), scope) if err != nil { return nil, errors.Wrap(err, "obtaining authorization token") @@ -182,6 +226,11 @@ func (p *tokenProvider) getTokenForScope(ctx context.Context, namespaceScope str return token, nil } +// invalidateToken removes the cached token for the given scope. +func (p *tokenProvider) invalidateToken(scope string) { + p.tokenCache.Remove(scope) +} + // buildTokenRequest creates the token request based on the namespace scope. // Returns an error if the cluster ID is not available yet. func (p *tokenProvider) buildTokenRequest(namespaceScope string) (*centralv1.GenerateTokenForPermissionsAndScopeRequest, error) { diff --git a/sensor/common/centralproxy/transport_test.go b/sensor/common/centralproxy/transport_test.go index b24e425d6af79..a5b24a6a9c340 100644 --- a/sensor/common/centralproxy/transport_test.go +++ b/sensor/common/centralproxy/transport_test.go @@ -3,6 +3,7 @@ package centralproxy import ( "context" "errors" + "fmt" "io" "net/http" "net/http/httptest" @@ -406,6 +407,396 @@ func (d *dynamicFakeTokenServiceClient) GenerateTokenForPermissionsAndScope( }, nil } +func TestScopedTokenTransport_InvalidateOnUnauthorized(t *testing.T) { + t.Run("401 response triggers retry with fresh token", func(t *testing.T) { + tokenCallCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + tokenCallCount++ + return fmt.Sprintf("token-%d", tokenCallCount) + }, + } + + requestCount := 0 + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + requestCount++ + // First request returns 401, retry returns 200 + if req.Header.Get("Authorization") == "Bearer token-1" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: io.NopCloser(strings.NewReader(`{"error":"unauthorized"}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + // Single RoundTrip should retry internally and return success + resp, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode, "should return success after retry") + assert.Equal(t, 2, tokenCallCount, "should have requested two tokens (original + retry)") + assert.Equal(t, 2, requestCount, "should have made two requests (original + retry)") + }) + + t.Run("403 response triggers retry with fresh token", func(t *testing.T) { + tokenCallCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + tokenCallCount++ + return fmt.Sprintf("token-%d", tokenCallCount) + }, + } + + requestCount := 0 + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + requestCount++ + // First request returns 403, retry returns 200 + if req.Header.Get("Authorization") == "Bearer token-1" { + return &http.Response{ + StatusCode: http.StatusForbidden, + Body: io.NopCloser(strings.NewReader(`{"error":"forbidden"}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + // Single RoundTrip should retry internally and return success + resp, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode, "should return success after retry") + assert.Equal(t, 2, tokenCallCount, "should have requested two tokens (original + retry)") + assert.Equal(t, 2, requestCount, "should have made two requests (original + retry)") + }) + + t.Run("retry only happens once", func(t *testing.T) { + tokenCallCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + tokenCallCount++ + return fmt.Sprintf("token-%d", tokenCallCount) + }, + } + + requestCount := 0 + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + requestCount++ + // Always return 401 + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: io.NopCloser(strings.NewReader(`{"error":"unauthorized"}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + // Should retry once and then return the 401 + resp, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode, "should return 401 after retry fails") + assert.Equal(t, 2, tokenCallCount, "should have requested exactly two tokens") + assert.Equal(t, 2, requestCount, "should have made exactly two requests") + }) + + t.Run("other error responses do not invalidate cache", func(t *testing.T) { + statusCodes := []int{ + http.StatusOK, + http.StatusNotFound, + http.StatusInternalServerError, + } + + for _, statusCode := range statusCodes { + t.Run(fmt.Sprintf("status %d", statusCode), func(t *testing.T) { + callCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + callCount++ + return "cached-token" + }, + } + + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: io.NopCloser(strings.NewReader(`{}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + // First request + _, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, 1, callCount) + + // Second request - should use cached token + _, err = transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, 1, callCount, "token should still be cached for status %d", statusCode) + }) + } + }) + + t.Run("transport error does not invalidate cache", func(t *testing.T) { + callCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + callCount++ + return "cached-token" + }, + } + + transportErr := errors.New("connection refused") + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + return nil, transportErr + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + // First request - transport error + _, err := transport.RoundTrip(req) + require.Error(t, err) + assert.Equal(t, 1, callCount) + + // Second request - should use cached token (error didn't invalidate) + _, err = transport.RoundTrip(req) + require.Error(t, err) + assert.Equal(t, 1, callCount, "token should still be cached after transport error") + }) +} + +func TestTokenProvider_InvalidateToken(t *testing.T) { + t.Run("invalidateToken removes token from cache", func(t *testing.T) { + callCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + callCount++ + return fmt.Sprintf("token-%d", callCount) + }, + } + + provider := newTestTokenProvider(fakeClient, "test-cluster-id") + + // Get token (causes cache) + token1, err := provider.getTokenForScope(context.Background(), "my-scope") + require.NoError(t, err) + assert.Equal(t, "token-1", token1) + assert.Equal(t, 1, callCount) + + // Get again - should be cached + token2, err := provider.getTokenForScope(context.Background(), "my-scope") + require.NoError(t, err) + assert.Equal(t, "token-1", token2) + assert.Equal(t, 1, callCount) + + // Invalidate + provider.invalidateToken("my-scope") + + // Get again - should fetch new token + token3, err := provider.getTokenForScope(context.Background(), "my-scope") + require.NoError(t, err) + assert.Equal(t, "token-2", token3) + assert.Equal(t, 2, callCount) + }) + + t.Run("invalidateToken only affects specified scope", func(t *testing.T) { + callCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + callCount++ + return fmt.Sprintf("token-%d", callCount) + }, + } + + provider := newTestTokenProvider(fakeClient, "test-cluster-id") + + // Cache tokens for two scopes + _, err := provider.getTokenForScope(context.Background(), "scope-a") + require.NoError(t, err) + _, err = provider.getTokenForScope(context.Background(), "scope-b") + require.NoError(t, err) + assert.Equal(t, 2, callCount) + + // Invalidate only scope-a + provider.invalidateToken("scope-a") + + // scope-a should get new token + tokenA, err := provider.getTokenForScope(context.Background(), "scope-a") + require.NoError(t, err) + assert.Equal(t, "token-3", tokenA) + assert.Equal(t, 3, callCount) + + // scope-b should still be cached + tokenB, err := provider.getTokenForScope(context.Background(), "scope-b") + require.NoError(t, err) + assert.Equal(t, "token-2", tokenB) + assert.Equal(t, 3, callCount) + }) +} + +func TestScopedTokenTransport_RetryWithRequestBody(t *testing.T) { + t.Run("POST request with body retries successfully", func(t *testing.T) { + tokenCallCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + tokenCallCount++ + return fmt.Sprintf("token-%d", tokenCallCount) + }, + } + + bodiesReceived := []string{} + requestCount := 0 + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + requestCount++ + // Read the body to verify it's available + body, _ := io.ReadAll(req.Body) + bodiesReceived = append(bodiesReceived, string(body)) + + if req.Header.Get("Authorization") == "Bearer token-1" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: io.NopCloser(strings.NewReader(`{"error":"unauthorized"}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + // Create request with body (body is buffered upfront, so retry works). + bodyContent := `{"data":"test"}` + req := httptest.NewRequest(http.MethodPost, "/v1/alerts", strings.NewReader(bodyContent)) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + resp, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode, "should return success after retry") + assert.Equal(t, 2, tokenCallCount, "should have requested two tokens") + assert.Equal(t, 2, requestCount, "should have made two requests") + // Both requests should have received the body + assert.Equal(t, []string{bodyContent, bodyContent}, bodiesReceived, "both requests should receive the body") + }) + + t.Run("first response body is drained and closed before retry", func(t *testing.T) { + tokenCallCount := 0 + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + tokenCallCount++ + return fmt.Sprintf("token-%d", tokenCallCount) + }, + } + + firstBodyDrained := false + firstBodyClosed := false + mockBase := roundTripperFunc(func(req *http.Request) (*http.Response, error) { + if req.Header.Get("Authorization") == "Bearer token-1" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: &trackingReadCloser{ + ReadCloser: io.NopCloser(strings.NewReader(`{"error":"unauthorized"}`)), + onRead: func() { firstBodyDrained = true }, + onClose: func() { firstBodyClosed = true }, + }, + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{"ok":true}`)), + Header: http.Header{"Content-Type": []string{"application/json"}}, + }, nil + }) + + transport := &scopedTokenTransport{ + base: mockBase, + tokenProvider: newTestTokenProvider(fakeClient, "test-cluster-id"), + } + + req := httptest.NewRequest(http.MethodGet, "/v1/alerts", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + + resp, err := transport.RoundTrip(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.True(t, firstBodyDrained, "first response body should be drained before retry") + assert.True(t, firstBodyClosed, "first response body should be closed before retry") + }) +} + +// trackingReadCloser wraps an io.ReadCloser and tracks read/close operations. +type trackingReadCloser struct { + io.ReadCloser + onRead func() + onClose func() +} + +func (t *trackingReadCloser) Read(p []byte) (n int, err error) { + if t.onRead != nil { + t.onRead() + } + return t.ReadCloser.Read(p) +} + +func (t *trackingReadCloser) Close() error { + if t.onClose != nil { + t.onClose() + } + return t.ReadCloser.Close() +} + // roundTripperFunc is a helper to create RoundTripper from a function. type roundTripperFunc func(*http.Request) (*http.Response, error) From 48993382796454a5ebb109741cddb9b57fc1de2e Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Tue, 27 Jan 2026 12:10:50 +0100 Subject: [PATCH 025/232] chore(collector): Update COLLECTOR_VERSION (#18646) --- COLLECTOR_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COLLECTOR_VERSION b/COLLECTOR_VERSION index cbd15a0dc8b42..b94c548573151 100644 --- a/COLLECTOR_VERSION +++ b/COLLECTOR_VERSION @@ -1 +1 @@ -3.23.x-110-gae27601472 +3.23.x-117-g24f41bdc67 From f256d93bfd398531dfa735f31cbd3ccf97b5cc80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:29:25 +0000 Subject: [PATCH 026/232] chore(deps): bump wheel from 0.45.1 to 0.46.2 in /operator/bundle_helpers (#18634) --- operator/bundle_helpers/requirements-build.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/operator/bundle_helpers/requirements-build.txt b/operator/bundle_helpers/requirements-build.txt index 5f7bb0f4c1dc5..f9aa46337a0dc 100644 --- a/operator/bundle_helpers/requirements-build.txt +++ b/operator/bundle_helpers/requirements-build.txt @@ -74,9 +74,9 @@ flit-core==3.10.1 \ --hash=sha256:66e5b87874a0d6e39691f0e22f09306736b633548670ad3c09ec9db03c5662f7 \ --hash=sha256:cb31a76e8b31ad3351bb89e531f64ef2b05d1e65bd939183250bf81ddf4922a8 # via -r requirements-build.in -wheel==0.45.1 \ - --hash=sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729 \ - --hash=sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248 +wheel==0.46.2 \ + --hash=sha256:33ae60725d69eaa249bc1982e739943c23b34b58d51f1cb6253453773aca6e65 \ + --hash=sha256:3d79e48fde9847618a5a181f3cc35764c349c752e2fe911e65fa17faab9809b0 # via -r requirements-build.in # The following packages are considered to be unsafe in a requirements file: From 8bcdd2e37d3a652150501bb6474bb8fa16d03255 Mon Sep 17 00:00:00 2001 From: David Vail Date: Tue, 27 Jan 2026 08:29:26 -0500 Subject: [PATCH 027/232] ROX-32773: Add tests to check table columns in ns view (#18653) --- .../projects/projectSecurityTab.test.ts | 77 +++++++++++++++++-- .../cypress/integration-ocp/routes.ts | 2 +- .../workloads/securityTab.test.ts | 4 +- 3 files changed, 72 insertions(+), 11 deletions(-) diff --git a/ui/apps/platform/cypress/integration-ocp/projects/projectSecurityTab.test.ts b/ui/apps/platform/cypress/integration-ocp/projects/projectSecurityTab.test.ts index 0d03a1df9cb7f..a79692873b333 100644 --- a/ui/apps/platform/cypress/integration-ocp/projects/projectSecurityTab.test.ts +++ b/ui/apps/platform/cypress/integration-ocp/projects/projectSecurityTab.test.ts @@ -1,7 +1,10 @@ import { visitFromConsoleLeftNavExpandable } from '../../helpers/nav'; import { withOcpAuth } from '../../helpers/ocpAuth'; +import { hasFeatureFlag } from '../../helpers/features'; +import { assertVisibleTableColumns } from '../../helpers/tableHelpers'; import { interceptAndWatchRequests } from '../../helpers/request'; import pf6 from '../../selectors/pf6'; +import { selectors } from '../../integration/vulnerabilities/vulnerabilities.selectors'; import { acsAuthNamespaceHeader, getImageCVEListRoute, @@ -9,6 +12,17 @@ import { routeMatcherMapForBasePlugin, } from '../routes'; +function visitStackroxProjectSecurityTab(visitFunction: () => void, resourceIconTitle: string) { + withOcpAuth(); + visitFunction(); + + cy.get(pf6.menuToggle).contains('Requester').click(); + cy.get(pf6.menuItem).contains('Name').click(); + cy.get('input[aria-label="Name filter"]').type('stackrox'); + cy.get(`[title="${resourceIconTitle}"] + a`).contains('stackrox').click(); + cy.get(pf6.tabButton).contains('Security').click(); +} + function visitProjectSecurityTabAndCheckAuthHeaders( visitFunction: () => void, resourceIconTitle: string @@ -17,14 +31,7 @@ function visitProjectSecurityTabAndCheckAuthHeaders( ...routeMatcherMapForBasePlugin, [getImageCVEListRoute]: getImageCVEListRouteMatcher, }).then(({ waitForRequests }) => { - withOcpAuth(); - visitFunction(); - - cy.get(pf6.menuToggle).contains('Requester').click(); - cy.get(pf6.menuItem).contains('Name').click(); - cy.get('input[aria-label="Name filter"]').type('stackrox'); - cy.get(`[title="${resourceIconTitle}"] + a`).contains('stackrox').click(); - cy.get(pf6.tabButton).contains('Security').click(); + visitStackroxProjectSecurityTab(visitFunction, resourceIconTitle); waitForRequests([]).then( ([ @@ -50,6 +57,48 @@ function visitProjectSecurityTabAndCheckAuthHeaders( }); } +function visitProjectSecurityTabAndCheckColumns( + visitFunction: () => void, + resourceIconTitle: string +) { + visitStackroxProjectSecurityTab(visitFunction, resourceIconTitle); + + // Check CVE table columns + const expectedCveTableColumns = [ + 'Row expansion', + 'CVE', + 'Images by severity', + 'Top CVSS', + hasFeatureFlag('ROX_SCANNER_V4') ? 'Top NVD CVSS' : null, + hasFeatureFlag('ROX_SCANNER_V4') ? 'EPSS probability' : null, + 'First discovered', + 'Published', + ].filter((column) => column !== null); + assertVisibleTableColumns('table', expectedCveTableColumns); + + // Check Image table columns + const expectedImageTableColumns = [ + 'Image', + 'CVEs by severity', + 'Operating system', + 'Deployments', + 'Age', + 'Scan time', + ]; + cy.get(selectors.entityTypeToggleItem('Image')).click(); + assertVisibleTableColumns('table', expectedImageTableColumns); + + // Check Deployment table columns - Namespace column should be hidden in project-scoped view + const expectedDeploymentTableColumns = [ + 'Deployment', + 'CVEs by severity', + 'Images', + 'First discovered', + ]; + cy.get(selectors.entityTypeToggleItem('Deployment')).click(); + assertVisibleTableColumns('table', expectedDeploymentTableColumns); +} + describe('Project Security Tabs', () => { describe('Project Security Tab via Home -> Projects', () => { it('should send the correct auth headers for namespace scoped requests on project security tab', () => { @@ -57,6 +106,12 @@ describe('Project Security Tabs', () => { visitFromConsoleLeftNavExpandable('Home', 'Projects'); }, 'Project'); }); + + it('should display only the expected table columns for each entity type', () => { + visitProjectSecurityTabAndCheckColumns(() => { + visitFromConsoleLeftNavExpandable('Home', 'Projects'); + }, 'Project'); + }); }); describe('Project Security Tab via Administration -> Namespaces', () => { @@ -65,5 +120,11 @@ describe('Project Security Tabs', () => { visitFromConsoleLeftNavExpandable('Administration', 'Namespaces'); }, 'Namespace'); }); + + it('should display only the expected table columns for each entity type', () => { + visitProjectSecurityTabAndCheckColumns(() => { + visitFromConsoleLeftNavExpandable('Administration', 'Namespaces'); + }, 'Namespace'); + }); }); }); diff --git a/ui/apps/platform/cypress/integration-ocp/routes.ts b/ui/apps/platform/cypress/integration-ocp/routes.ts index 4403ce170fc2f..7febe8523103f 100644 --- a/ui/apps/platform/cypress/integration-ocp/routes.ts +++ b/ui/apps/platform/cypress/integration-ocp/routes.ts @@ -5,7 +5,7 @@ export const featureFlagsRoute = 'featureFlags'; export const publicConfigRoute = 'publicConfig'; export const getImageCVEListRoute = 'getImageCVEList'; -export const deploymentListRoute = 'deploymentList'; +export const deploymentsRoute = 'deployments'; export const getCVEsForDeploymentRoute = 'getCVEsForDeployment'; export const metadataRouteMatcher = { method: 'GET', url: '**/api-service/**/v1/metadata' }; diff --git a/ui/apps/platform/cypress/integration-ocp/workloads/securityTab.test.ts b/ui/apps/platform/cypress/integration-ocp/workloads/securityTab.test.ts index bda16a4e72382..c566ea556c581 100644 --- a/ui/apps/platform/cypress/integration-ocp/workloads/securityTab.test.ts +++ b/ui/apps/platform/cypress/integration-ocp/workloads/securityTab.test.ts @@ -5,8 +5,8 @@ import { interceptAndWatchRequests } from '../../helpers/request'; import pf6 from '../../selectors/pf6'; import { acsAuthNamespaceHeader, - deploymentListRoute, deploymentListRouteMatcher, + deploymentsRoute, getCVEsForDeploymentRoute, getCVEsForDeploymentRouteMatcher, routeMatcherMapForBasePlugin, @@ -16,7 +16,7 @@ describe('Workloads - Security tab', () => { it('should send the correct auth headers for namespace scoped requests on workload security tab', () => { interceptAndWatchRequests({ ...routeMatcherMapForBasePlugin, - [deploymentListRoute]: deploymentListRouteMatcher, + [deploymentsRoute]: deploymentListRouteMatcher, [getCVEsForDeploymentRoute]: getCVEsForDeploymentRouteMatcher, }).then(({ waitForRequests }) => { withOcpAuth(); From e669f8fc8b783054225a234c5cd2baa3cc2b752b Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Tue, 27 Jan 2026 08:54:15 -0500 Subject: [PATCH 028/232] ROX-32824: Rewrite PrometheusMetricsCard with React elements (#18680) --- .../components/PrometheusMetricsCard.tsx | 72 ++++++++++++------- 1 file changed, 48 insertions(+), 24 deletions(-) diff --git a/ui/apps/platform/src/Containers/SystemConfig/Details/components/PrometheusMetricsCard.tsx b/ui/apps/platform/src/Containers/SystemConfig/Details/components/PrometheusMetricsCard.tsx index c1c5fdc1ab074..f0d08901311e4 100644 --- a/ui/apps/platform/src/Containers/SystemConfig/Details/components/PrometheusMetricsCard.tsx +++ b/ui/apps/platform/src/Containers/SystemConfig/Details/components/PrometheusMetricsCard.tsx @@ -89,7 +89,11 @@ const predefinedMetrics: Record< }, }; -function labelGroup(labels: PrometheusMetricsLabels): ReactElement { +export type PrometheusMetricsLabelGroupProps = { + labels: PrometheusMetricsLabels; +}; + +function PrometheusMetricsLabelGroup({ labels }: PrometheusMetricsLabelGroupProps): ReactElement { return ( {labels.labels.map((label) => { @@ -103,8 +107,11 @@ function labelGroup(labels: PrometheusMetricsLabels): ReactElement { ); } -// TODO: refactor it in order to make it a proper react component. -function filterGroup(labels: PrometheusMetricsLabels): ReactElement { +export type PrometheusMetricsFilterGroupProps = { + labels: PrometheusMetricsLabels; +}; + +function PrometheusMetricsFilterGroup({ labels }: PrometheusMetricsFilterGroupProps): ReactElement { const includeEntries = Object.entries(labels.includeFilters ?? {}).sort(([a], [b]) => a.localeCompare(b) ); @@ -131,16 +138,25 @@ function filterGroup(labels: PrometheusMetricsLabels): ReactElement { ); } -function predefinedMetricTableRow( - rowIndex: number, - enabled: boolean, - category: PrometheusMetricsCategory, - metric: string, +export type PrometheusMetricsPredefinedMetricTableRowProps = { + category: PrometheusMetricsCategory; + enabled: boolean; + metric: string; onCustomChange: | ((value: unknown, id: string) => Promise | Promise>) - | undefined, - showFilters: boolean -): ReactElement { + | undefined; + rowIndex: number; + showFilters: boolean; +}; + +function PrometheusMetricsPredefinedMetricTableRow({ + rowIndex, + enabled, + category, + metric, + onCustomChange, + showFilters, +}: PrometheusMetricsPredefinedMetricTableRowProps): ReactElement { return ( {onCustomChange ? ( @@ -169,11 +185,11 @@ function predefinedMetricTableRow( Predefined - {labelGroup(predefinedMetrics[category][metric])} + {showFilters && ( - {filterGroup(predefinedMetrics[category][metric])} + )} @@ -283,13 +299,15 @@ function PrometheusMetricsTable({ const enabled = descriptors !== undefined && predefinedMetric in descriptors; if (isEnabledOriginal || (onCustomChange && !enabled)) { - return predefinedMetricTableRow( - rowIndex, - isEnabledOriginal, - category, - predefinedMetric, - onCustomChange, - showFilters + return ( + ); } return null; @@ -316,8 +334,14 @@ function PrometheusMetricsTable({ {metric} Custom - {labelGroup(labels)} - {showFilters && {filterGroup(labels)}} + + + + {showFilters && ( + + + + )} ); })} @@ -338,7 +362,7 @@ export function PrometheusMetricsCard({ period, descriptors, title, -}: PrometheusMetricsCardProps) { +}: PrometheusMetricsCardProps): ReactElement { const hasMetrics = descriptors && Object.keys(descriptors).length > 0; return ( @@ -449,7 +473,7 @@ export function PrometheusMetricsForm({ title, onChange, onCustomChange, -}: PrometheusMetricsFormProps) { +}: PrometheusMetricsFormProps): ReactElement { return ( From f764f1089d29bb8f6e1e35f1a757732ec1f8010e Mon Sep 17 00:00:00 2001 From: Guzman Date: Tue, 27 Jan 2026 15:18:32 +0100 Subject: [PATCH 029/232] ROX-32069: Add random delay to roxagent initial report send (#18594) Co-authored-by: Piotr Rygielski <114479+vikin91@users.noreply.github.com> --- compliance/virtualmachines/roxagent/README.md | 10 +++++-- .../virtualmachines/roxagent/cmd/cmd.go | 3 ++ .../virtualmachines/roxagent/common/config.go | 15 +++++----- .../virtualmachines/roxagent/index/index.go | 28 +++++++++++++++++-- 4 files changed, 43 insertions(+), 13 deletions(-) diff --git a/compliance/virtualmachines/roxagent/README.md b/compliance/virtualmachines/roxagent/README.md index b7fca2fdbc523..40923f8b2807f 100644 --- a/compliance/virtualmachines/roxagent/README.md +++ b/compliance/virtualmachines/roxagent/README.md @@ -26,6 +26,7 @@ sudo ./roxagent --daemon --index-interval 10m --host-path /custom/path --port 20 - `--daemon` - Run continuously (default: false). - `--index-interval` - Time between scans in daemon mode (default: 4h). - `--host-path` - Where to look for package databases (default: /). +- `--max-initial-report-delay` - Max delay before starting to send in daemon mode (default: 20m). - `--port` - VSock port (default: 818). - `--repo-cpe-url` - URL for the repository to CPE mapping. - `--timeout` - VSock client timeout when sending index reports. @@ -51,16 +52,19 @@ GOOS=linux GOARCH=amd64 go build -o roxagent-linux . ## Troubleshooting -**Can't connect to host** +### Can't connect to host + - Check if vsock is enabled in the VM. - Verify the port isn't in use. - Make sure vsock kernel modules are loaded. -**No packages found** +### No packages found + - Check `--host-path` points to the right place. - Verify `rpm`/`dnf` databases exist and are readable. - Use `--verbose` to examine the index report and compare with the content from `rpm`/`dnf` databases. -**Scan failures** +### Scan failures + - Check internet access for repo-to-CPE downloads. - Look at logs for specific errors. diff --git a/compliance/virtualmachines/roxagent/cmd/cmd.go b/compliance/virtualmachines/roxagent/cmd/cmd.go index 6e4cd207927b5..b8580e5fe84fc 100644 --- a/compliance/virtualmachines/roxagent/cmd/cmd.go +++ b/compliance/virtualmachines/roxagent/cmd/cmd.go @@ -32,6 +32,9 @@ func RootCmd(ctx context.Context) *cobra.Command { cmd.Flags().StringVar(&cfg.IndexHostPath, "host-path", "/", "Path where the indexer starts searching for the RPM and DNF databases.", ) + cmd.Flags().DurationVar(&cfg.MaxInitialReportDelay, "max-initial-report-delay", 20*time.Minute, + "Max delay before starting to send in daemon mode.", + ) cmd.Flags().StringVar(&cfg.RepoToCPEMappingURL, "repo-cpe-url", repoToCPEMappingURL, "URL for the repository to CPE mapping.", ) diff --git a/compliance/virtualmachines/roxagent/common/config.go b/compliance/virtualmachines/roxagent/common/config.go index 969a328897adf..9946eeb3ad386 100644 --- a/compliance/virtualmachines/roxagent/common/config.go +++ b/compliance/virtualmachines/roxagent/common/config.go @@ -3,11 +3,12 @@ package common import "time" type Config struct { - DaemonMode bool - IndexHostPath string - IndexInterval time.Duration - RepoToCPEMappingURL string - Timeout time.Duration - Verbose bool - VsockPort uint32 + DaemonMode bool + IndexHostPath string + IndexInterval time.Duration + MaxInitialReportDelay time.Duration + RepoToCPEMappingURL string + Timeout time.Duration + Verbose bool + VsockPort uint32 } diff --git a/compliance/virtualmachines/roxagent/index/index.go b/compliance/virtualmachines/roxagent/index/index.go index 87c67d66dc9b6..03c7d0b2fd0ce 100644 --- a/compliance/virtualmachines/roxagent/index/index.go +++ b/compliance/virtualmachines/roxagent/index/index.go @@ -3,6 +3,7 @@ package index import ( "context" "fmt" + "math/rand" "net/http" "time" @@ -21,9 +22,12 @@ const ( ) func RunDaemon(ctx context.Context, cfg *common.Config, client *vsock.Client) error { - // Create the initial index report immediately. + if err := applyRandomDelay(ctx, cfg.MaxInitialReportDelay); err != nil { + return fmt.Errorf("delaying initial index: %w", err) + } + if err := RunSingle(ctx, cfg, client); err != nil { - log.Errorf("Failed to run initial index: %v", err) + return fmt.Errorf("handling initial index: %w", err) } ticker := time.NewTicker(cfg.IndexInterval) @@ -35,7 +39,7 @@ func RunDaemon(ctx context.Context, cfg *common.Config, client *vsock.Client) er return ctx.Err() case <-ticker.C: if err := RunSingle(ctx, cfg, client); err != nil { - log.Errorf("Failed to run index: %v", err) + log.Errorf("Failed to handle index: %v", err) } } } @@ -75,3 +79,21 @@ func runIndexer(ctx context.Context, cfg *common.Config) (*v4.IndexReport, error } return report, nil } + +func applyRandomDelay(ctx context.Context, maxDelay time.Duration) error { + if maxDelay <= 0 { + return nil + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + delay := time.Duration(r.Int63n(maxDelay.Nanoseconds() + 1)) + + log.Infof("Delaying initial index report by %s (use --max-initial-report-delay to control this).", delay) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + return nil + } +} From 3d1f9195ec16eb915075e0bed6dddb5097e6b0f7 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Tue, 27 Jan 2026 09:24:09 -0500 Subject: [PATCH 030/232] ROX-32820: gate Postgres retries (#18670) --- pkg/env/postgres_retry.go | 14 +++ pkg/postgres/pgutils/retry.go | 20 ++-- pkg/postgres/pgutils/retry_test.go | 176 +++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+), 9 deletions(-) create mode 100644 pkg/env/postgres_retry.go create mode 100644 pkg/postgres/pgutils/retry_test.go diff --git a/pkg/env/postgres_retry.go b/pkg/env/postgres_retry.go new file mode 100644 index 0000000000000..20738aafc047a --- /dev/null +++ b/pkg/env/postgres_retry.go @@ -0,0 +1,14 @@ +package env + +import "time" + +var ( + // PostgresQueryRetryInterval is the interval between retry attempts for transient PostgreSQL errors + PostgresQueryRetryInterval = registerDurationSetting("ROX_POSTGRES_QUERY_RETRY_INTERVAL", 5*time.Second) + + // PostgresQueryRetryTimeout is the maximum duration to retry transient PostgreSQL errors + PostgresQueryRetryTimeout = registerDurationSetting("ROX_POSTGRES_QUERY_RETRY_TIMEOUT", 5*time.Minute) + + // PostgresDisableQueryRetries disables retry logic for transient PostgreSQL errors (fail fast after single attempt) + PostgresDisableQueryRetries = RegisterBooleanSetting("ROX_POSTGRES_DISABLE_QUERY_RETRIES", false) +) diff --git a/pkg/postgres/pgutils/retry.go b/pkg/postgres/pgutils/retry.go index 98cf7311e53fb..0f6d001f935e9 100644 --- a/pkg/postgres/pgutils/retry.go +++ b/pkg/postgres/pgutils/retry.go @@ -6,11 +6,7 @@ import ( "time" "github.com/jackc/pgx/v5" -) - -const ( - interval = 5 * time.Second - timeout = 5 * time.Minute + "github.com/stackrox/rox/pkg/env" ) // Retry is used to specify how long to retry to successfully run a query with 1 return value @@ -52,7 +48,8 @@ func Retry3[T any, U any](ctx context.Context, fn func() (T, U, error)) (T, U, e } // Run query immediately - if val1, val2, err := fn(); err == nil || !IsTransientError(err) { + val1, val2, err := fn() + if err == nil || !IsTransientError(err) { if err != nil && err != pgx.ErrNoRows { log.Debugf("UNEXPECTED: found non-retryable error: %+v", err) return ret1, ret2, fmt.Errorf("found non-retryable error: %w", err) @@ -61,13 +58,18 @@ func Retry3[T any, U any](ctx context.Context, fn func() (T, U, error)) (T, U, e return val1, val2, err } - expirationTimer := time.NewTimer(timeout) + // If retries are disabled, fail fast after single attempt + if env.PostgresDisableQueryRetries.BooleanSetting() { + log.Debugf("retry disabled: found error: %+v", err) + return ret1, ret2, err + } + + expirationTimer := time.NewTimer(env.PostgresQueryRetryTimeout.DurationSetting()) defer expirationTimer.Stop() - intervalTicker := time.NewTicker(interval) + intervalTicker := time.NewTicker(env.PostgresQueryRetryInterval.DurationSetting()) defer intervalTicker.Stop() - var err error for { select { case <-ctx.Done(): diff --git a/pkg/postgres/pgutils/retry_test.go b/pkg/postgres/pgutils/retry_test.go new file mode 100644 index 0000000000000..325dd7b4b2595 --- /dev/null +++ b/pkg/postgres/pgutils/retry_test.go @@ -0,0 +1,176 @@ +package pgutils + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/jackc/pgx/v5/pgconn" + "github.com/stretchr/testify/assert" +) + +// transientErr creates a transient PostgreSQL error that will trigger retries +func transientErr() error { + return &pgconn.PgError{ + Code: "08006", // connection_failure - a transient error code + } +} + +// nonTransientErr creates a non-transient error that should not trigger retries +func nonTransientErr() error { + return errors.New("non-transient error") +} + +func TestRetry_SuccessOnFirstAttempt(t *testing.T) { + ctx := context.Background() + attempts := 0 + + err := Retry(ctx, func() error { + attempts++ + return nil + }) + + assert.NoError(t, err) + assert.Equal(t, 1, attempts, "should succeed on first attempt") +} + +func TestRetry_NonTransientErrorFailsFast(t *testing.T) { + ctx := context.Background() + attempts := 0 + + err := Retry(ctx, func() error { + attempts++ + return nonTransientErr() + }) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "non-retryable error") + assert.Equal(t, 1, attempts, "should not retry non-transient errors") +} + +func TestRetry_TransientErrorRetries(t *testing.T) { + // Set shorter intervals for faster testing + t.Setenv("ROX_POSTGRES_QUERY_RETRY_INTERVAL", "100ms") + t.Setenv("ROX_POSTGRES_QUERY_RETRY_TIMEOUT", "1s") + + ctx := context.Background() + attempts := 0 + maxAttempts := 3 + + err := Retry(ctx, func() error { + attempts++ + if attempts < maxAttempts { + return transientErr() + } + return nil + }) + + assert.NoError(t, err) + assert.Equal(t, maxAttempts, attempts, "should retry until success") +} + +func TestRetry_TransientErrorTimeout(t *testing.T) { + // Set very short timeout for faster testing + t.Setenv("ROX_POSTGRES_QUERY_RETRY_INTERVAL", "50ms") + t.Setenv("ROX_POSTGRES_QUERY_RETRY_TIMEOUT", "200ms") + + ctx := context.Background() + attempts := 0 + + start := time.Now() + err := Retry(ctx, func() error { + attempts++ + return transientErr() + }) + elapsed := time.Since(start) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "retry timer is expired") + assert.Greater(t, attempts, 1, "should retry at least once before timeout") + assert.GreaterOrEqual(t, elapsed, 200*time.Millisecond, "should respect timeout") +} + +func TestRetry_DisabledRetriesFailFast(t *testing.T) { + // Enable disable flag + t.Setenv("ROX_POSTGRES_DISABLE_QUERY_RETRIES", "true") + + ctx := context.Background() + attempts := 0 + + start := time.Now() + err := Retry(ctx, func() error { + attempts++ + return transientErr() + }) + elapsed := time.Since(start) + + assert.Error(t, err) + assert.Equal(t, 1, attempts, "should only attempt once when retries disabled") + assert.Less(t, elapsed, 100*time.Millisecond, "should fail fast without waiting") +} + +func TestRetry_ContextCancellation(t *testing.T) { + // Set shorter intervals for faster testing + t.Setenv("ROX_POSTGRES_QUERY_RETRY_INTERVAL", "100ms") + t.Setenv("ROX_POSTGRES_QUERY_RETRY_TIMEOUT", "5s") + + ctx, cancel := context.WithCancel(context.Background()) + attempts := 0 + + // Cancel context after 2 attempts + go func() { + time.Sleep(150 * time.Millisecond) + cancel() + }() + + err := Retry(ctx, func() error { + attempts++ + return transientErr() + }) + + assert.Error(t, err) + assert.Contains(t, err.Error(), "retry context is done") +} + +func TestRetryIfPostgres_DelegatesToRetry(t *testing.T) { + ctx := context.Background() + attempts := 0 + + err := RetryIfPostgres(ctx, func() error { + attempts++ + return nil + }) + + assert.NoError(t, err) + assert.Equal(t, 1, attempts) +} + +func TestRetry_CustomIntervalIsRespected(t *testing.T) { + // Set custom interval + t.Setenv("ROX_POSTGRES_QUERY_RETRY_INTERVAL", "200ms") + + ctx := context.Background() + attempts := 0 + var attemptTimes []time.Time + + err := Retry(ctx, func() error { + attempts++ + attemptTimes = append(attemptTimes, time.Now()) + if attempts < 3 { + return transientErr() + } + return nil + }) + + assert.NoError(t, err) + assert.Equal(t, 3, attempts) + + // Check that intervals between attempts are roughly 200ms + if len(attemptTimes) >= 2 { + interval1 := attemptTimes[1].Sub(attemptTimes[0]) + // Allow some tolerance for timing (150-250ms) + assert.Greater(t, interval1, 150*time.Millisecond) + assert.Less(t, interval1, 300*time.Millisecond) + } +} From 2fb5cedd98cd47a24837a6748e12f48d5c7f9dc0 Mon Sep 17 00:00:00 2001 From: Yann Brillouet <91869377+rhybrillou@users.noreply.github.com> Date: Tue, 27 Jan 2026 15:32:50 +0100 Subject: [PATCH 031/232] ROX-32452: Introduce dynamic origin for access control objects (#18607) --- .../internaltokens/service/role_manager.go | 2 +- central/role/service/service_impl.go | 34 +- .../service_impl_accessscope_postgres_test.go | 110 +++ .../service_impl_other_postgres_test.go | 487 ++++++++++++ ...ervice_impl_permissionset_postgres_test.go | 109 +++ .../service_impl_role_postgres_test.go | 226 ++++++ central/role/service/service_impl_test.go | 728 ------------------ central/role/service/test_helpers_test.go | 240 ++++++ generated/api/v1/auth_service.swagger.json | 5 +- .../api/v1/authprovider_service.swagger.json | 5 +- generated/api/v1/group_service.swagger.json | 8 +- .../api/v1/notifier_service.swagger.json | 5 +- generated/api/v1/role_service.swagger.json | 5 +- ...signature_integration_service.swagger.json | 5 +- generated/storage/traits.pb.go | 17 +- pkg/declarativeconfig/context.go | 4 +- pkg/declarativeconfig/context_test.go | 13 +- pkg/declarativeconfig/origin.go | 12 +- pkg/declarativeconfig/origin_test.go | 12 + proto/storage/proto.lock | 4 + proto/storage/traits.proto | 8 +- ui/apps/platform/src/types/traits.proto.ts | 7 +- ui/apps/platform/src/utils/traits.utils.ts | 1 + 23 files changed, 1288 insertions(+), 759 deletions(-) create mode 100644 central/role/service/service_impl_accessscope_postgres_test.go create mode 100644 central/role/service/service_impl_other_postgres_test.go create mode 100644 central/role/service/service_impl_permissionset_postgres_test.go create mode 100644 central/role/service/service_impl_role_postgres_test.go create mode 100644 central/role/service/test_helpers_test.go diff --git a/central/auth/internaltokens/service/role_manager.go b/central/auth/internaltokens/service/role_manager.go index 069a6a240b35c..b024a78bfc944 100644 --- a/central/auth/internaltokens/service/role_manager.go +++ b/central/auth/internaltokens/service/role_manager.go @@ -35,7 +35,7 @@ type roleManager struct { } var ( - generatedObjectTraits = &storage.Traits{Origin: storage.Traits_IMPERATIVE} + generatedObjectTraits = &storage.Traits{Origin: storage.Traits_DYNAMIC} ) // createPermissionSet creates a dynamic permission set, granting the requested permissions. diff --git a/central/role/service/service_impl.go b/central/role/service/service_impl.go index 2e01f43ecfb70..e9166135c1fde 100644 --- a/central/role/service/service_impl.go +++ b/central/role/service/service_impl.go @@ -82,7 +82,10 @@ func (*serviceImpl) AuthFuncOverride(ctx context.Context, fullMethodName string) } func (s *serviceImpl) GetRoles(ctx context.Context, _ *v1.Empty) (*v1.GetRolesResponse, error) { - roles, err := s.roleDataStore.GetAllRoles(ctx) + roles, err := s.roleDataStore.GetRolesFiltered(ctx, func(role *storage.Role) bool { + // filter out dynamic roles created to back Rox tokens issued for internal purposes. + return role.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve roles") } @@ -117,6 +120,9 @@ func (s *serviceImpl) CreateRole(ctx context.Context, roleRequest *v1.CreateRole if role.GetName() != "" && role.GetName() != roleRequest.GetName() { return nil, errox.InvalidArgs.CausedBy("different role names in path and body") } + if role.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic roles can only be created by internal services") + } role.Name = roleRequest.GetName() err := s.roleDataStore.AddRole(ctx, role) @@ -127,6 +133,9 @@ func (s *serviceImpl) CreateRole(ctx context.Context, roleRequest *v1.CreateRole } func (s *serviceImpl) UpdateRole(ctx context.Context, role *storage.Role) (*v1.Empty, error) { + if role.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic roles cannot be modified by API") + } err := s.roleDataStore.UpdateRole(ctx, role) if err != nil { return nil, err @@ -183,7 +192,10 @@ func (s *serviceImpl) GetPermissionSet(ctx context.Context, id *v1.ResourceByID) } func (s *serviceImpl) ListPermissionSets(ctx context.Context, _ *v1.Empty) (*v1.ListPermissionSetsResponse, error) { - permissionSets, err := s.roleDataStore.GetAllPermissionSets(ctx) + permissionSets, err := s.roleDataStore.GetPermissionSetsFiltered(ctx, func(permissionSet *storage.PermissionSet) bool { + // filter out dynamic permission sets created to back Rox tokens issued for internal purposes. + return permissionSet.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve permission sets") } @@ -202,6 +214,10 @@ func (s *serviceImpl) PostPermissionSet(ctx context.Context, permissionSet *stor } permissionSet.Id = rolePkg.GeneratePermissionSetID() + if permissionSet.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic permission sets can only be created by internal services") + } + // Store the augmented permission set; report back on error. Note the // permission set is referenced by its name because that's what the caller // knows. @@ -215,6 +231,9 @@ func (s *serviceImpl) PostPermissionSet(ctx context.Context, permissionSet *stor } func (s *serviceImpl) PutPermissionSet(ctx context.Context, permissionSet *storage.PermissionSet) (*v1.Empty, error) { + if permissionSet.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic permission sets cannot be modified by API") + } err := s.roleDataStore.UpdatePermissionSet(ctx, permissionSet) if err != nil { return nil, errors.Wrapf(err, "failed to update permission set %s", permissionSet.GetId()) @@ -249,7 +268,10 @@ func (s *serviceImpl) GetSimpleAccessScope(ctx context.Context, id *v1.ResourceB } func (s *serviceImpl) ListSimpleAccessScopes(ctx context.Context, _ *v1.Empty) (*v1.ListSimpleAccessScopesResponse, error) { - scopes, err := s.roleDataStore.GetAllAccessScopes(ctx) + scopes, err := s.roleDataStore.GetAccessScopesFiltered(ctx, func(scope *storage.SimpleAccessScope) bool { + // filter out dynamic access scopes created to back Rox tokens issued for internal purposes. + return scope.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve access scopes") } @@ -267,6 +289,9 @@ func (s *serviceImpl) PostSimpleAccessScope(ctx context.Context, scope *storage. return nil, errox.InvalidArgs.CausedBy("setting id field is not allowed") } scope.Id = rolePkg.GenerateAccessScopeID() + if scope.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic access scopes can only be created by internal services") + } // Store the augmented access scope; report back on error. Note the access // scope is referenced by its name because that's what the caller knows. @@ -280,6 +305,9 @@ func (s *serviceImpl) PostSimpleAccessScope(ctx context.Context, scope *storage. } func (s *serviceImpl) PutSimpleAccessScope(ctx context.Context, scope *storage.SimpleAccessScope) (*v1.Empty, error) { + if scope.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + return nil, errox.InvalidArgs.CausedBy("dynamic access scopes cannot be modified by API") + } err := s.roleDataStore.UpdateAccessScope(ctx, scope) if err != nil { return nil, errors.Wrapf(err, "failed to update access scope %s", scope.GetId()) diff --git a/central/role/service/service_impl_accessscope_postgres_test.go b/central/role/service/service_impl_accessscope_postgres_test.go new file mode 100644 index 0000000000000..0fff6ff7b8903 --- /dev/null +++ b/central/role/service/service_impl_accessscope_postgres_test.go @@ -0,0 +1,110 @@ +//go:build sql_integration + +package service + +import ( + "testing" + + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/errox" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/sac" + "github.com/stretchr/testify/suite" +) + +func TestServiceImplWithDB_AccessScopes(t *testing.T) { + suite.Run(t, new(serviceImplAccessScopeTestSuite)) +} + +type serviceImplAccessScopeTestSuite struct { + suite.Suite + + tester *serviceImplTester +} + +func (s *serviceImplAccessScopeTestSuite) SetupSuite() { + s.tester = &serviceImplTester{} + s.tester.Setup(s.T()) +} + +func (s *serviceImplAccessScopeTestSuite) SetupTest() { + s.Require().NotNil(s.tester) + s.tester.SetupTest(s.T()) +} + +func (s *serviceImplAccessScopeTestSuite) TearDownTest() { + s.Require().NotNil(s.tester) + s.tester.TearDownTest(s.T()) +} + +func (s *serviceImplAccessScopeTestSuite) TestListAccessScopes() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + + accessScopeName1 := "TestListAccessScopes_noTraits" + accessScopeName2 := "TestListAccessScopes_imperativeOriginTraits" + accessScopeName3 := "TestListAccessScopes_declarativeOriginTraits" + accessScopeName4 := "TestListAccessScopes_orphanedDeclarativeOriginTraits" + accessScopeName5 := "TestListAccessScopes_dynamicOriginTraits" + scope1 := s.tester.createAccessScope(t, accessScopeName1, nilTraits) + scope2 := s.tester.createAccessScope(t, accessScopeName2, imperativeOriginTraits) + scope3 := s.tester.createAccessScope(t, accessScopeName3, declarativeOriginTraits) + scope4 := s.tester.createAccessScope(t, accessScopeName4, orphanedDeclarativeOriginTraits) + scope5 := s.tester.createAccessScope(t, accessScopeName5, dynamicOriginTraits) + + scopes, err := s.tester.service.ListSimpleAccessScopes(ctx, &v1.Empty{}) + s.NoError(err) + s.Len(scopes.GetAccessScopes(), 4) + + protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope1) + protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope2) + protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope3) + protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope4) + // Roles with dynamic origin are filtered out. + protoassert.SliceNotContains(s.T(), scopes.GetAccessScopes(), scope5) +} + +func (s *serviceImplAccessScopeTestSuite) TestPostAccessScope() { + s.Run("Scope without specified origin can be created by API", func() { + inputScope := &storage.SimpleAccessScope{ + Name: "Test basic access scope", + Rules: &storage.SimpleAccessScope_Rules{}, + } + ctx := sac.WithAllAccess(s.T().Context()) + scope, err := s.tester.service.PostSimpleAccessScope(ctx, inputScope) + s.NoError(err) + inputScope.Id = scope.GetId() + protoassert.Equal(s.T(), inputScope, scope) + }) + s.Run("Dynamic scopes cannot be created by API", func() { + inputScope := &storage.SimpleAccessScope{ + Traits: dynamicOriginTraits, + } + ctx := sac.WithAllAccess(s.T().Context()) + scope, err := s.tester.service.PostSimpleAccessScope(ctx, inputScope) + s.ErrorIs(err, errox.InvalidArgs) + s.Nil(scope) + }) +} + +func (s *serviceImplAccessScopeTestSuite) TestPutAccessScope() { + s.Run("Scope without specified origin can be updated by API", func() { + scopeName := "Access scope without origin" + inputScope := s.tester.createAccessScope(s.T(), scopeName, nilTraits) + updatedScope := inputScope.CloneVT() + updatedScope.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.PutSimpleAccessScope(ctx, updatedScope) + s.NoError(err) + }) + s.Run("Dynamic scopes cannot be created by API", func() { + scopeName := "Dynamic access scope" + inputScope := s.tester.createAccessScope(s.T(), scopeName, dynamicOriginTraits) + updatedScope := inputScope.CloneVT() + updatedScope.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.PutSimpleAccessScope(ctx, updatedScope) + s.ErrorIs(err, errox.InvalidArgs) + }) +} diff --git a/central/role/service/service_impl_other_postgres_test.go b/central/role/service/service_impl_other_postgres_test.go new file mode 100644 index 0000000000000..d159cf587a64d --- /dev/null +++ b/central/role/service/service_impl_other_postgres_test.go @@ -0,0 +1,487 @@ +//go:build sql_integration + +package service + +import ( + "context" + "testing" + + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/auth/permissions" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/sac/resources" + "github.com/stretchr/testify/suite" +) + +const ( + clusterPermission = "Cluster" + compliancePermission = "Compliance" + deploymentPermission = "Deployment" + deploymentExtensionPermission = "DeploymentExtension" + integrationPermission = "Integration" + namespacePermission = "Namespace" + networkGraphPermission = "NetworkGraph" + nodePermission = "Node" + rolePermission = "Role" +) + +func TestServiceImplWithDB_Other(t *testing.T) { + suite.Run(t, new(serviceImplOtherTestSuite)) +} + +type serviceImplOtherTestSuite struct { + suite.Suite + + tester *serviceImplTester +} + +func (s *serviceImplOtherTestSuite) SetupSuite() { + s.tester = &serviceImplTester{} + s.tester.Setup(s.T()) +} + +func (s *serviceImplOtherTestSuite) SetupTest() { + s.Require().NotNil(s.tester) + s.tester.SetupTest(s.T()) +} + +func (s *serviceImplOtherTestSuite) TearDownTest() { + s.Require().NotNil(s.tester) + s.tester.TearDownTest(s.T()) +} + +func (s *serviceImplOtherTestSuite) TestGetClustersForPermissions() { + deepPurpleClusterID := s.tester.clusterNameToIDMap[clusterDeepPurple.GetName()] + queenClusterID := s.tester.clusterNameToIDMap[clusterQueen.GetName()] + testResourceScope1 := getTestResourceScopeSingleNamespace( + queenClusterID, + namespaceInnuendoInClusterQueen.GetName()) + pinkFloydClusterID := s.tester.clusterNameToIDMap[clusterPinkFloyd.GetName()] + testResourceScope2 := getTestResourceScopeSingleNamespace( + pinkFloydClusterID, + namespaceTheWallInClusterPinkFloyd.GetName()) + + testScopeMap := sac.TestScopeMap{ + storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ + resources.Integration.GetResource(): { + Included: true, + }, + resources.Node.GetResource(): testResourceScope1, + resources.Deployment.GetResource(): testResourceScope1, + resources.NetworkGraph.GetResource(): testResourceScope2, + }, + } + + extendedAccessTestScopeMap := sac.TestScopeMap{ + storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ + resources.Compliance.GetResource(): { + Included: true, + }, + resources.Integration.GetResource(): { + Included: true, + }, + resources.Node.GetResource(): testResourceScope1, + resources.Deployment.GetResource(): testResourceScope1, + resources.NetworkGraph.GetResource(): testResourceScope2, + }, + } + + deepPurpleClusterResponse := &v1.ScopeObject{ + Id: deepPurpleClusterID, + Name: clusterDeepPurple.GetName(), + } + + queenClusterResponse := &v1.ScopeObject{ + Id: queenClusterID, + Name: clusterQueen.GetName(), + } + + pinkFloydClusterResponse := &v1.ScopeObject{ + Id: pinkFloydClusterID, + Name: clusterPinkFloyd.GetName(), + } + + testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), + sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) + + extendedAccessTestCtx := sac.WithGlobalAccessScopeChecker(context.Background(), + sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), extendedAccessTestScopeMap)) + + testCases := []struct { + name string + context context.Context + testedPermissions []string + expectedClusters []*v1.ScopeObject + }{ + { + name: "Global permission (Not Granted) gets no cluster data.", + context: testCtx, + testedPermissions: []string{rolePermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Global permission (Granted) gets no cluster data.", + context: testCtx, + testedPermissions: []string{integrationPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Not granted Cluster scoped permission gets no cluster data.", + context: testCtx, + testedPermissions: []string{clusterPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Granted Cluster scoped permission gets only cluster data for clusters in permission scope.", + context: testCtx, + testedPermissions: []string{nodePermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Not granted Namespace scoped permission gets no cluster data.", + context: testCtx, + testedPermissions: []string{namespacePermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Granted Namespace scoped permission gets only cluster data for clusters in permission scope.", + context: testCtx, + testedPermissions: []string{deploymentPermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Multiple not granted Namespace scoped permissions get no cluster data.", + context: testCtx, + testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Multiple Namespace scoped permissions get only cluster data for clusters in granted permission scopes.", + context: testCtx, + testedPermissions: []string{namespacePermission, deploymentPermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "empty permission list get cluster data for all cluster data in scope of granted cluster and namespace permissions.", + context: testCtx, + testedPermissions: []string{}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse, pinkFloydClusterResponse}, + }, + { + name: "Extended Access - Global permission (Not Granted) gets no cluster data.", + context: extendedAccessTestCtx, + testedPermissions: []string{rolePermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Extended Access - Global permission (Granted) gets no cluster data.", + context: extendedAccessTestCtx, + testedPermissions: []string{integrationPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Extended Access - Not granted Cluster scoped permission gets no cluster data.", + context: extendedAccessTestCtx, + testedPermissions: []string{clusterPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Extended Access - Granted Cluster scoped permission at resource level gets all cluster data", + context: extendedAccessTestCtx, + testedPermissions: []string{compliancePermission}, + expectedClusters: []*v1.ScopeObject{ + deepPurpleClusterResponse, + queenClusterResponse, + pinkFloydClusterResponse, + }, + }, + { + name: "Extended Access - Granted Cluster scoped permission gets only cluster data for clusters in permission scope.", + context: extendedAccessTestCtx, + testedPermissions: []string{nodePermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Extended Access - Not granted Namespace scoped permission gets no cluster data.", + context: extendedAccessTestCtx, + testedPermissions: []string{namespacePermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Extended Access - Granted Namespace scoped permission gets only cluster data for clusters in permission scope.", + context: extendedAccessTestCtx, + testedPermissions: []string{deploymentPermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Extended Access - Multiple not granted Namespace scoped permissions get no cluster data.", + context: extendedAccessTestCtx, + testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, + expectedClusters: []*v1.ScopeObject{}, + }, + { + name: "Extended Access - Multiple Namespace scoped permissions get only cluster data for clusters in granted permission scopes.", + context: extendedAccessTestCtx, + testedPermissions: []string{namespacePermission, deploymentPermission}, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Extended Access - empty permission list get cluster data for all cluster data in scope of granted cluster and namespace permissions.", + context: extendedAccessTestCtx, + testedPermissions: []string{}, + expectedClusters: []*v1.ScopeObject{ + deepPurpleClusterResponse, + queenClusterResponse, + pinkFloydClusterResponse, + }, + }, + } + + for _, c := range testCases { + s.Run(c.name, func() { + clusterResponse, err := s.tester.service.GetClustersForPermissions(c.context, &v1.GetClustersForPermissionsRequest{ + Pagination: nil, + Permissions: c.testedPermissions, + }) + s.NoError(err) + protoassert.ElementsMatch(s.T(), clusterResponse.GetClusters(), c.expectedClusters) + }) + } +} + +func (s *serviceImplOtherTestSuite) TestGetClustersForPermissionsPagination() { + queenClusterID := s.tester.clusterNameToIDMap[clusterQueen.GetName()] + testResourceScope1 := getTestResourceScopeSingleNamespace( + queenClusterID, + namespaceInnuendoInClusterQueen.GetName()) + pinkFloydClusterID := s.tester.clusterNameToIDMap[clusterPinkFloyd.GetName()] + testResourceScope2 := getTestResourceScopeSingleNamespace( + pinkFloydClusterID, + namespaceTheWallInClusterPinkFloyd.GetName()) + testScopeMap := sac.TestScopeMap{ + storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ + resources.Integration.GetResource(): { + Included: true, + }, + resources.Node.GetResource(): testResourceScope1, + resources.Deployment.GetResource(): testResourceScope1, + resources.NetworkGraph.GetResource(): testResourceScope2, + }, + } + + queenClusterResponse := &v1.ScopeObject{ + Id: queenClusterID, + Name: clusterQueen.GetName(), + } + + pinkFloydClusterResponse := &v1.ScopeObject{ + Id: pinkFloydClusterID, + Name: clusterPinkFloyd.GetName(), + } + + testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), + sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) + + testCases := []struct { + name string + pagination *v1.Pagination + expectedClusters []*v1.ScopeObject + }{ + { + name: "No offset and a limit restricts to a list of appropriate size", + pagination: &v1.Pagination{ + Limit: 1, + Offset: 0, + SortOption: &v1.SortOption{ + Field: "Cluster", + Reversed: false, + }, + }, + expectedClusters: []*v1.ScopeObject{pinkFloydClusterResponse}, + }, + { + name: "Offset and no limit restricts to a list of appropriate size starting with expected value", + pagination: &v1.Pagination{ + Limit: 0, + Offset: 1, + SortOption: &v1.SortOption{ + Field: "Cluster", + Reversed: false, + }, + }, + expectedClusters: []*v1.ScopeObject{queenClusterResponse}, + }, + { + name: "Sort options without offset nor limit return the expected results", + pagination: &v1.Pagination{ + Limit: 0, + Offset: 0, + SortOption: &v1.SortOption{ + Field: "Cluster", + Reversed: false, + }, + }, + expectedClusters: []*v1.ScopeObject{pinkFloydClusterResponse, queenClusterResponse}, + }, + { + name: "Reversed sort without offset nor limit return the expected results", + pagination: &v1.Pagination{ + Limit: 0, + Offset: 0, + SortOption: &v1.SortOption{ + Field: "Cluster", + Reversed: true, + }, + }, + expectedClusters: []*v1.ScopeObject{queenClusterResponse, pinkFloydClusterResponse}, + }, + } + + for _, c := range testCases { + s.Run(c.name, func() { + clusterResponse, err := s.tester.service.GetClustersForPermissions(testCtx, &v1.GetClustersForPermissionsRequest{ + Pagination: c.pagination, + Permissions: []string{}, + }) + s.NoError(err) + protoassert.SlicesEqual(s.T(), clusterResponse.GetClusters(), c.expectedClusters) + }) + } +} + +func (s *serviceImplOtherTestSuite) TestGetNamespacesForClusterAndPermissions() { + queenClusterID := s.tester.clusterNameToIDMap[clusterQueen.GetName()] + testResourceScope1 := getTestResourceScopeSingleNamespace( + queenClusterID, + namespaceInnuendoInClusterQueen.GetName()) + pinkFloydClusterID := s.tester.clusterNameToIDMap[clusterPinkFloyd.GetName()] + testResourceScope2 := getTestResourceScopeSingleNamespace( + pinkFloydClusterID, + namespaceTheWallInClusterPinkFloyd.GetName()) + testResourceScope3 := getTestResourceScopeSingleNamespace( + queenClusterID, + namespaceQueenInClusterQueen.GetName()) + testScopeMap := sac.TestScopeMap{ + storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ + resources.Integration.GetResource(): { + Included: true, + }, + resources.Node.GetResource(): testResourceScope1, + resources.Deployment.GetResource(): testResourceScope1, + resources.NetworkGraph.GetResource(): testResourceScope2, + resources.Image.GetResource(): testResourceScope3, + }, + } + + queenQueenNamespaceResponse := &v1.ScopeObject{ + Id: getNamespaceID(namespaceQueenInClusterQueen.GetName()), + Name: namespaceQueenInClusterQueen.GetName(), + } + + queenInnuendoNamespaceResponse := &v1.ScopeObject{ + Id: getNamespaceID(namespaceInnuendoInClusterQueen.GetName()), + Name: namespaceInnuendoInClusterQueen.GetName(), + } + + pinkFloydTheWallNamespaceResponse := &v1.ScopeObject{ + Id: getNamespaceID(namespaceTheWallInClusterPinkFloyd.GetName()), + Name: namespaceTheWallInClusterPinkFloyd.GetName(), + } + + testCases := []struct { + name string + testedClusterID string + testedPermissions []string + expectedNamespaces []*v1.ScopeObject + }{ + { + name: "Global permission (Not Granted) gets no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{rolePermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Global permission (Granted) gets no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{integrationPermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Not granted Cluster scoped permission gets no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{clusterPermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Granted Cluster scoped permission gets no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{nodePermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Not granted Namespace scoped permission gets no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{namespacePermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Granted Namespace scoped permission gets only namespace data for namespaces in cluster and permission scope.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{deploymentPermission}, + expectedNamespaces: []*v1.ScopeObject{queenInnuendoNamespaceResponse}, + }, + { + name: "Granted Namespace scoped permission gets only namespace data for namespaces in cluster and permission scope (other permission).", + testedClusterID: s.tester.clusterNameToIDMap[clusterPinkFloyd.GetName()], + testedPermissions: []string{networkGraphPermission}, + expectedNamespaces: []*v1.ScopeObject{pinkFloydTheWallNamespaceResponse}, + }, + { + name: "Multiple not granted Namespace scoped permissions get no namespace data.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, + expectedNamespaces: []*v1.ScopeObject{}, + }, + { + name: "Multiple Namespace scoped permissions get only namespace data for namespaces in granted permission scopes.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{namespacePermission, deploymentPermission}, + expectedNamespaces: []*v1.ScopeObject{queenInnuendoNamespaceResponse}, + }, + { + name: "empty permission list get namespace data for all namespaces in scope of target cluster and granted namespace permissions.", + testedClusterID: s.tester.clusterNameToIDMap[clusterQueen.GetName()], + testedPermissions: []string{}, + expectedNamespaces: []*v1.ScopeObject{queenQueenNamespaceResponse, queenInnuendoNamespaceResponse}, + }, + } + + testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), + sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) + + for _, c := range testCases { + s.Run(c.name, func() { + request := &v1.GetNamespaceForClusterAndPermissionsRequest{ + ClusterId: c.testedClusterID, + Permissions: c.testedPermissions, + } + namespaceResponse, err := s.tester.service.GetNamespacesForClusterAndPermissions(testCtx, request) + s.NoError(err) + protoassert.ElementsMatch(s.T(), namespaceResponse.GetNamespaces(), c.expectedNamespaces) + }) + } +} + +func getTestResourceScopeSingleNamespace(clusterID string, namespace string) *sac.TestResourceScope { + return &sac.TestResourceScope{ + Clusters: map[string]*sac.TestClusterScope{ + clusterID: { + Namespaces: []string{namespace}, + Included: false, + }, + }, + Included: false, + } +} diff --git a/central/role/service/service_impl_permissionset_postgres_test.go b/central/role/service/service_impl_permissionset_postgres_test.go new file mode 100644 index 0000000000000..321accf10d56e --- /dev/null +++ b/central/role/service/service_impl_permissionset_postgres_test.go @@ -0,0 +1,109 @@ +//go:build sql_integration + +package service + +import ( + "testing" + + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/errox" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/sac" + "github.com/stretchr/testify/suite" +) + +func TestServiceImplWithDB_PermissionSet(t *testing.T) { + suite.Run(t, new(serviceImplPermissionSetTestSuite)) +} + +type serviceImplPermissionSetTestSuite struct { + suite.Suite + + tester *serviceImplTester +} + +func (s *serviceImplPermissionSetTestSuite) SetupSuite() { + s.tester = &serviceImplTester{} + s.tester.Setup(s.T()) +} + +func (s *serviceImplPermissionSetTestSuite) SetupTest() { + s.Require().NotNil(s.tester) + s.tester.SetupTest(s.T()) +} + +func (s *serviceImplPermissionSetTestSuite) TearDownTest() { + s.Require().NotNil(s.tester) + s.tester.TearDownTest(s.T()) +} + +func (s *serviceImplPermissionSetTestSuite) TestListPermissionSets() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + + permissionSetName1 := "TestListPermissionSets_noTraits" + permissionSetName2 := "TestListPermissionSets_imperativeOriginTraits" + permissionSetName3 := "TestListPermissionSets_declarativeOriginTraits" + permissionSetName4 := "TestListPermissionSets_orphanedDeclarativeOriginTraits" + permissionSetName5 := "TestListPermissionSets_dynamicOriginTraits" + permissionSet1 := s.tester.createPermissionSet(t, permissionSetName1, nilTraits) + permissionSet2 := s.tester.createPermissionSet(t, permissionSetName2, imperativeOriginTraits) + permissionSet3 := s.tester.createPermissionSet(t, permissionSetName3, declarativeOriginTraits) + permissionSet4 := s.tester.createPermissionSet(t, permissionSetName4, orphanedDeclarativeOriginTraits) + permissionSet5 := s.tester.createPermissionSet(t, permissionSetName5, dynamicOriginTraits) + + permissionSets, err := s.tester.service.ListPermissionSets(ctx, &v1.Empty{}) + s.NoError(err) + s.Len(permissionSets.GetPermissionSets(), 4) + + protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet1) + protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet2) + protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet3) + protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet4) + // Roles with dynamic origin are filtered out. + protoassert.SliceNotContains(s.T(), permissionSets.GetPermissionSets(), permissionSet5) +} + +func (s *serviceImplPermissionSetTestSuite) TestPostPermissionSet() { + s.Run("Permission set without specified origin can be created by API", func() { + inputPermissionSet := &storage.PermissionSet{ + Name: "Test basic permission set", + } + ctx := sac.WithAllAccess(s.T().Context()) + permissionSet, err := s.tester.service.PostPermissionSet(ctx, inputPermissionSet) + s.NoError(err) + inputPermissionSet.Id = permissionSet.GetId() + protoassert.Equal(s.T(), inputPermissionSet, permissionSet) + }) + s.Run("Dynamic scopes cannot be created by API", func() { + inputScope := &storage.SimpleAccessScope{ + Traits: dynamicOriginTraits, + } + ctx := sac.WithAllAccess(s.T().Context()) + scope, err := s.tester.service.PostSimpleAccessScope(ctx, inputScope) + s.ErrorIs(err, errox.InvalidArgs) + s.Nil(scope) + }) +} + +func (s *serviceImplPermissionSetTestSuite) TestPutPermissionSet() { + s.Run("Permission set without specified origin can be updated by API", func() { + permissionSetName := "Permission set without origin" + inputPermissionSet := s.tester.createPermissionSet(s.T(), permissionSetName, nilTraits) + updatedPermissionSet := inputPermissionSet.CloneVT() + updatedPermissionSet.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.PutPermissionSet(ctx, updatedPermissionSet) + s.NoError(err) + }) + s.Run("Dynamic scopes cannot be created by API", func() { + permissionSetName := "Dynamic permission set" + inputPermissionSet := s.tester.createPermissionSet(s.T(), permissionSetName, dynamicOriginTraits) + updatedPermissionSet := inputPermissionSet.CloneVT() + updatedPermissionSet.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.PutPermissionSet(ctx, updatedPermissionSet) + s.ErrorIs(err, errox.InvalidArgs) + }) +} diff --git a/central/role/service/service_impl_role_postgres_test.go b/central/role/service/service_impl_role_postgres_test.go new file mode 100644 index 0000000000000..d8c39a8d511de --- /dev/null +++ b/central/role/service/service_impl_role_postgres_test.go @@ -0,0 +1,226 @@ +//go:build sql_integration + +package service + +import ( + "fmt" + "testing" + + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/defaults/accesscontrol" + "github.com/stackrox/rox/pkg/errox" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/sac" + "github.com/stretchr/testify/suite" +) + +func TestServiceImplWithDB_Roles(t *testing.T) { + suite.Run(t, new(serviceImplRoleTestSuite)) +} + +type serviceImplRoleTestSuite struct { + suite.Suite + + tester *serviceImplTester +} + +func (s *serviceImplRoleTestSuite) SetupSuite() { + s.tester = &serviceImplTester{} + s.tester.Setup(s.T()) +} + +func (s *serviceImplRoleTestSuite) SetupTest() { + s.Require().NotNil(s.tester) + s.tester.SetupTest(s.T()) +} + +func (s *serviceImplRoleTestSuite) TearDownTest() { + s.Require().NotNil(s.tester) + s.tester.TearDownTest(s.T()) +} + +func (s *serviceImplRoleTestSuite) TestCreateRoleValidAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestCreateRoleValidAccessScopeID" + + ps := s.tester.createPermissionSet(t, roleName, nilTraits) + scope := s.tester.createAccessScope(t, roleName, nilTraits) + + role := getValidRole(roleName) + role.PermissionSetId = ps.GetId() + role.AccessScopeId = scope.GetId() + createRoleRequest := &v1.CreateRoleRequest{ + Name: roleName, + Role: role, + } + _, err := s.tester.service.CreateRole(ctx, createRoleRequest) + s.NoError(err) + s.tester.storedRoleNames = append(s.tester.storedRoleNames, role.GetName()) +} + +func (s *serviceImplRoleTestSuite) TestCreateRoleEmptyAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestCreateRoleEmptyAccessScopeID" + + ps := s.tester.createPermissionSet(t, roleName, nilTraits) + + role := getValidRole(roleName) + role.PermissionSetId = ps.GetId() + role.AccessScopeId = "" + createRoleRequest := &v1.CreateRoleRequest{ + Name: roleName, + Role: role, + } + _, err := s.tester.service.CreateRole(ctx, createRoleRequest) + s.ErrorContains(err, "role access_scope_id field must be set") +} + +func (s *serviceImplRoleTestSuite) TestUpdateExistingRoleValidAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestUpdateExistingRoleValidAccessScopeID" + role := s.tester.createRole(t, roleName, nilTraits) + newScope := s.tester.createAccessScope(t, "new scope", nilTraits) + role.AccessScopeId = newScope.GetId() + _, err := s.tester.service.UpdateRole(ctx, role) + s.NoError(err) +} + +func (s *serviceImplRoleTestSuite) TestUpdateExistingRoleEmptyAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestUpdateExistingRoleEmptyAccessScopeID" + role := s.tester.createRole(t, roleName, nilTraits) + role.AccessScopeId = "" + _, err := s.tester.service.UpdateRole(ctx, role) + s.ErrorContains(err, "role access_scope_id field must be set") +} + +func (s *serviceImplRoleTestSuite) TestUpdateMissingRoleValidAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestUpdateMissingRoleValidAccessScopeID" + ps := s.tester.createPermissionSet(t, roleName, nilTraits) + scope := s.tester.createAccessScope(t, roleName, nilTraits) + role := getValidRole(roleName) + role.PermissionSetId = ps.GetId() + role.AccessScopeId = scope.GetId() + _, err := s.tester.service.UpdateRole(ctx, role) + s.ErrorIs(err, errox.NotFound) +} + +func (s *serviceImplRoleTestSuite) TestUpdateMissingRoleEmptyAccessScopeID() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + roleName := "TestUpdateMissingRoleEmptyAccessScopeID" + ps := s.tester.createPermissionSet(t, roleName, nilTraits) + role := getValidRole(roleName) + role.PermissionSetId = ps.GetId() + role.AccessScopeId = "" + _, err := s.tester.service.UpdateRole(ctx, role) + s.ErrorContains(err, "role access_scope_id field must be set") +} + +func (s *serviceImplRoleTestSuite) TestGetRoles() { + t := s.T() + ctx := sac.WithAllAccess(t.Context()) + + roleName1 := "TestGetRoles_noTraits" + roleName2 := "TestGetRoles_imperativeOriginTraits" + roleName3 := "TestGetRoles_declarativeOriginTraits" + roleName4 := "TestGetRoles_orphanedDeclarativeOriginTraits" + roleName5 := "TestGetRoles_dynamicOriginTraits" + role1 := s.tester.createRole(t, roleName1, nilTraits) + role2 := s.tester.createRole(t, roleName2, imperativeOriginTraits) + role3 := s.tester.createRole(t, roleName3, declarativeOriginTraits) + role4 := s.tester.createRole(t, roleName4, orphanedDeclarativeOriginTraits) + role5 := s.tester.createRole(t, roleName5, dynamicOriginTraits) + + roles, err := s.tester.service.GetRoles(ctx, &v1.Empty{}) + s.NoError(err) + s.Len(roles.GetRoles(), 4) + + protoassert.SliceContains(s.T(), roles.GetRoles(), role1) + protoassert.SliceContains(s.T(), roles.GetRoles(), role2) + protoassert.SliceContains(s.T(), roles.GetRoles(), role3) + protoassert.SliceContains(s.T(), roles.GetRoles(), role4) + // Roles with dynamic origin are filtered out. + protoassert.SliceNotContains(s.T(), roles.GetRoles(), role5) +} + +func (s *serviceImplRoleTestSuite) TestCreateRole() { + s.Run("Role without specified origin can be created by API", func() { + roleName := "Basic test role" + ps := s.tester.createPermissionSet(s.T(), roleName, nilTraits) + as := s.tester.createAccessScope(s.T(), roleName, nilTraits) + roleCreationRequest := &v1.CreateRoleRequest{ + Name: roleName, + Role: &storage.Role{ + Name: roleName, + PermissionSetId: ps.GetId(), + AccessScopeId: as.GetId(), + Traits: nilTraits, + }, + } + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.CreateRole(ctx, roleCreationRequest) + s.NoError(err) + s.tester.storedRoleNames = append(s.tester.storedRoleNames, roleName) + roleLookupRequest := &v1.ResourceByID{Id: roleName} + role, fetchErr := s.tester.service.GetRole(ctx, roleLookupRequest) + s.NoError(fetchErr) + protoassert.Equal(s.T(), roleCreationRequest.GetRole(), role) + }) + s.Run("Dynamic roles cannot be created by API", func() { + roleName := "Dynamic test role" + roleCreationRequest := &v1.CreateRoleRequest{ + Name: roleName, + Role: &storage.Role{ + Name: roleName, + PermissionSetId: accesscontrol.DefaultPermissionSetIDs[accesscontrol.Admin], + AccessScopeId: accesscontrol.DefaultAccessScopeIDs[accesscontrol.UnrestrictedAccessScope], + Traits: dynamicOriginTraits, + }, + } + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.CreateRole(ctx, roleCreationRequest) + s.ErrorIs(err, errox.InvalidArgs) + }) +} + +func (s *serviceImplRoleTestSuite) TestUpdateRole() { + s.Run("Roles without specified origin can be updated by API", func() { + roleName := "Test Update of basic role" + role := s.tester.createRole(s.T(), roleName, nilTraits) + updatedRole := role.CloneVT() + updatedRole.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + _, err := s.tester.service.UpdateRole(ctx, role) + s.NoError(err) + }) + s.Run("Dynamic roles cannot be updated by API", func() { + roleName := "Test update of dynamic role" + role := s.tester.createRole(s.T(), roleName, dynamicOriginTraits) + updatedRole := role.CloneVT() + updatedRole.Description = "Updated description" + ctx := sac.WithAllAccess(s.T().Context()) + updated, err := s.tester.service.UpdateRole(ctx, role) + s.ErrorIs(err, errox.InvalidArgs) + s.Nil(updated) + }) +} + +func getValidRole(name string) *storage.Role { + permissionSetID := accesscontrol.DefaultPermissionSetIDs[accesscontrol.Admin] + scopeID := accesscontrol.DefaultAccessScopeIDs[accesscontrol.UnrestrictedAccessScope] + return &storage.Role{ + Name: name, + Description: fmt.Sprintf("Test role for %s", name), + PermissionSetId: permissionSetID, + AccessScopeId: scopeID, + Traits: nil, + } +} diff --git a/central/role/service/service_impl_test.go b/central/role/service/service_impl_test.go index ab6f9ffc7f16a..9ab7605724f0c 100644 --- a/central/role/service/service_impl_test.go +++ b/central/role/service/service_impl_test.go @@ -4,31 +4,20 @@ package service import ( "context" - "fmt" "testing" - "time" - clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" - namespaceDataStore "github.com/stackrox/rox/central/namespace/datastore" - roleDatastore "github.com/stackrox/rox/central/role/datastore" - "github.com/stackrox/rox/central/role/sachelper" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/auth/authproviders" - "github.com/stackrox/rox/pkg/auth/permissions" - "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/defaults/accesscontrol" "github.com/stackrox/rox/pkg/errox" "github.com/stackrox/rox/pkg/grpc/authn/basic" "github.com/stackrox/rox/pkg/labels" - "github.com/stackrox/rox/pkg/postgres/pgtest" "github.com/stackrox/rox/pkg/protoassert" - "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/effectiveaccessscope" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -515,723 +504,6 @@ func TestEffectiveAccessScopeForSimpleAccessScope(t *testing.T) { } } -func TestServiceImplWithDB(t *testing.T) { - suite.Run(t, new(serviceImplTestSuite)) -} - -type serviceImplTestSuite struct { - suite.Suite - - postgres *pgtest.TestPostgres - service *serviceImpl - - storedClusterIDs []string - storedNamespaceIDs []string - clusterNameToIDMap map[string]string - - storedPermissionSetIDs []string - storedAccessScopeIDs []string - storedRoleNames []string -} - -func (s *serviceImplTestSuite) SetupSuite() { - var err error - s.postgres = pgtest.ForT(s.T()) - s.Require().NotNil(s.postgres) - roleStore := roleDatastore.GetTestPostgresDataStore(s.T(), s.postgres.DB) - clusterStore, err := clusterDataStore.GetTestPostgresDataStore(s.T(), s.postgres.DB) - s.Require().NoError(err) - namespaceStore, err := namespaceDataStore.GetTestPostgresDataStore(s.T(), s.postgres.DB) - s.Require().NoError(err) - - s.service = &serviceImpl{ - roleDataStore: roleStore, - clusterDataStore: clusterStore, - namespaceDataStore: namespaceStore, - clusterSACHelper: sachelper.NewClusterSacHelper(clusterStore), - namespaceSACHelper: sachelper.NewClusterNamespaceSacHelper(clusterStore, namespaceStore), - } -} - -func (s *serviceImplTestSuite) SetupTest() { - s.storedAccessScopeIDs = make([]string, 0) - s.storedPermissionSetIDs = make([]string, 0) - s.storedRoleNames = make([]string, 0) - - s.storedClusterIDs = make([]string, 0) - s.storedNamespaceIDs = make([]string, 0) - s.clusterNameToIDMap = make(map[string]string, 0) - - writeCtx := sac.WithAllAccess(context.Background()) - - for _, cluster := range storageClusters { - clusterToAdd := cluster.CloneVT() - clusterToAdd.Id = "" - clusterToAdd.MainImage = "quay.io/rhacs-eng/main:latest" - id, err := s.service.clusterDataStore.AddCluster(writeCtx, clusterToAdd) - s.Require().NoError(err) - s.clusterNameToIDMap[clusterToAdd.GetName()] = id - s.storedClusterIDs = append(s.storedClusterIDs, id) - } - - for _, namespace := range storageNamespaces { - ns := namespace.CloneVT() - ns.Id = getNamespaceID(ns.GetName()) - ns.ClusterId = s.clusterNameToIDMap[ns.GetClusterName()] - s.Require().NoError(s.service.namespaceDataStore.AddNamespace(writeCtx, ns)) - s.storedNamespaceIDs = append(s.storedNamespaceIDs, ns.GetId()) - } -} - -func (s *serviceImplTestSuite) TearDownTest() { - writeCtx := sac.WithAllAccess(context.Background()) - for _, clusterID := range s.storedClusterIDs { - doneSignal := concurrency.NewSignal() - s.Require().NoError(s.service.clusterDataStore.RemoveCluster(writeCtx, clusterID, &doneSignal)) - require.Eventually(s.T(), - func() bool { return doneSignal.IsDone() }, - 5*time.Second, - 10*time.Millisecond, - ) - } - s.storedClusterIDs = s.storedClusterIDs[:0] - for _, namespaceID := range s.storedNamespaceIDs { - s.Require().NoError(s.service.namespaceDataStore.RemoveNamespace(writeCtx, namespaceID)) - } - for _, roleName := range s.storedRoleNames { - s.deleteRole(roleName) - } - for _, permissionSetID := range s.storedPermissionSetIDs { - s.deletePermissionSet(permissionSetID) - } - for _, accessScopeID := range s.storedAccessScopeIDs { - s.deleteAccessScope(accessScopeID) - } -} - -const ( - namespaceUUIDNamespace = "namespace" - - clusterPermission = "Cluster" - compliancePermission = "Compliance" - deploymentPermission = "Deployment" - deploymentExtensionPermission = "DeploymentExtension" - integrationPermission = "Integration" - namespacePermission = "Namespace" - networkGraphPermission = "NetworkGraph" - nodePermission = "Node" - rolePermission = "Role" -) - -func getTestResourceScopeSingleNamespace(clusterID string, namespace string) *sac.TestResourceScope { - return &sac.TestResourceScope{ - Clusters: map[string]*sac.TestClusterScope{ - clusterID: { - Namespaces: []string{namespace}, - Included: false, - }, - }, - Included: false, - } -} - -func getNamespaceID(namespaceName string) string { - return uuid.NewV5FromNonUUIDs(namespaceUUIDNamespace, namespaceName).String() -} - -func (s *serviceImplTestSuite) TestGetClustersForPermissions() { - deepPurpleClusterID := s.clusterNameToIDMap[clusterDeepPurple.GetName()] - queenClusterID := s.clusterNameToIDMap[clusterQueen.GetName()] - testResourceScope1 := getTestResourceScopeSingleNamespace( - queenClusterID, - namespaceInnuendoInClusterQueen.GetName()) - pinkFloydClusterID := s.clusterNameToIDMap[clusterPinkFloyd.GetName()] - testResourceScope2 := getTestResourceScopeSingleNamespace( - pinkFloydClusterID, - namespaceTheWallInClusterPinkFloyd.GetName()) - - testScopeMap := sac.TestScopeMap{ - storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ - resources.Integration.GetResource(): { - Included: true, - }, - resources.Node.GetResource(): testResourceScope1, - resources.Deployment.GetResource(): testResourceScope1, - resources.NetworkGraph.GetResource(): testResourceScope2, - }, - } - - extendedAccessTestScopeMap := sac.TestScopeMap{ - storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ - resources.Compliance.GetResource(): { - Included: true, - }, - resources.Integration.GetResource(): { - Included: true, - }, - resources.Node.GetResource(): testResourceScope1, - resources.Deployment.GetResource(): testResourceScope1, - resources.NetworkGraph.GetResource(): testResourceScope2, - }, - } - - deepPurpleClusterResponse := &v1.ScopeObject{ - Id: deepPurpleClusterID, - Name: clusterDeepPurple.GetName(), - } - - queenClusterResponse := &v1.ScopeObject{ - Id: queenClusterID, - Name: clusterQueen.GetName(), - } - - pinkFloydClusterResponse := &v1.ScopeObject{ - Id: pinkFloydClusterID, - Name: clusterPinkFloyd.GetName(), - } - - testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), - sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) - - extendedAccessTestCtx := sac.WithGlobalAccessScopeChecker(context.Background(), - sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), extendedAccessTestScopeMap)) - - testCases := []struct { - name string - context context.Context - testedPermissions []string - expectedClusters []*v1.ScopeObject - }{ - { - name: "Global permission (Not Granted) gets no cluster data.", - context: testCtx, - testedPermissions: []string{rolePermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Global permission (Granted) gets no cluster data.", - context: testCtx, - testedPermissions: []string{integrationPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Not granted Cluster scoped permission gets no cluster data.", - context: testCtx, - testedPermissions: []string{clusterPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Granted Cluster scoped permission gets only cluster data for clusters in permission scope.", - context: testCtx, - testedPermissions: []string{nodePermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Not granted Namespace scoped permission gets no cluster data.", - context: testCtx, - testedPermissions: []string{namespacePermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Granted Namespace scoped permission gets only cluster data for clusters in permission scope.", - context: testCtx, - testedPermissions: []string{deploymentPermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Multiple not granted Namespace scoped permissions get no cluster data.", - context: testCtx, - testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Multiple Namespace scoped permissions get only cluster data for clusters in granted permission scopes.", - context: testCtx, - testedPermissions: []string{namespacePermission, deploymentPermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "empty permission list get cluster data for all cluster data in scope of granted cluster and namespace permissions.", - context: testCtx, - testedPermissions: []string{}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse, pinkFloydClusterResponse}, - }, - { - name: "Extended Access - Global permission (Not Granted) gets no cluster data.", - context: extendedAccessTestCtx, - testedPermissions: []string{rolePermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Extended Access - Global permission (Granted) gets no cluster data.", - context: extendedAccessTestCtx, - testedPermissions: []string{integrationPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Extended Access - Not granted Cluster scoped permission gets no cluster data.", - context: extendedAccessTestCtx, - testedPermissions: []string{clusterPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Extended Access - Granted Cluster scoped permission at resource level gets all cluster data", - context: extendedAccessTestCtx, - testedPermissions: []string{compliancePermission}, - expectedClusters: []*v1.ScopeObject{ - deepPurpleClusterResponse, - queenClusterResponse, - pinkFloydClusterResponse, - }, - }, - { - name: "Extended Access - Granted Cluster scoped permission gets only cluster data for clusters in permission scope.", - context: extendedAccessTestCtx, - testedPermissions: []string{nodePermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Extended Access - Not granted Namespace scoped permission gets no cluster data.", - context: extendedAccessTestCtx, - testedPermissions: []string{namespacePermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Extended Access - Granted Namespace scoped permission gets only cluster data for clusters in permission scope.", - context: extendedAccessTestCtx, - testedPermissions: []string{deploymentPermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Extended Access - Multiple not granted Namespace scoped permissions get no cluster data.", - context: extendedAccessTestCtx, - testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, - expectedClusters: []*v1.ScopeObject{}, - }, - { - name: "Extended Access - Multiple Namespace scoped permissions get only cluster data for clusters in granted permission scopes.", - context: extendedAccessTestCtx, - testedPermissions: []string{namespacePermission, deploymentPermission}, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Extended Access - empty permission list get cluster data for all cluster data in scope of granted cluster and namespace permissions.", - context: extendedAccessTestCtx, - testedPermissions: []string{}, - expectedClusters: []*v1.ScopeObject{ - deepPurpleClusterResponse, - queenClusterResponse, - pinkFloydClusterResponse, - }, - }, - } - - for _, c := range testCases { - s.Run(c.name, func() { - clusterResponse, err := s.service.GetClustersForPermissions(c.context, &v1.GetClustersForPermissionsRequest{ - Pagination: nil, - Permissions: c.testedPermissions, - }) - s.NoError(err) - protoassert.ElementsMatch(s.T(), clusterResponse.GetClusters(), c.expectedClusters) - }) - } -} - -func (s *serviceImplTestSuite) TestGetClustersForPermissionsPagination() { - queenClusterID := s.clusterNameToIDMap[clusterQueen.GetName()] - testResourceScope1 := getTestResourceScopeSingleNamespace( - queenClusterID, - namespaceInnuendoInClusterQueen.GetName()) - pinkFloydClusterID := s.clusterNameToIDMap[clusterPinkFloyd.GetName()] - testResourceScope2 := getTestResourceScopeSingleNamespace( - pinkFloydClusterID, - namespaceTheWallInClusterPinkFloyd.GetName()) - testScopeMap := sac.TestScopeMap{ - storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ - resources.Integration.GetResource(): { - Included: true, - }, - resources.Node.GetResource(): testResourceScope1, - resources.Deployment.GetResource(): testResourceScope1, - resources.NetworkGraph.GetResource(): testResourceScope2, - }, - } - - queenClusterResponse := &v1.ScopeObject{ - Id: queenClusterID, - Name: clusterQueen.GetName(), - } - - pinkFloydClusterResponse := &v1.ScopeObject{ - Id: pinkFloydClusterID, - Name: clusterPinkFloyd.GetName(), - } - - testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), - sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) - - testCases := []struct { - name string - pagination *v1.Pagination - expectedClusters []*v1.ScopeObject - }{ - { - name: "No offset and a limit restricts to a list of appropriate size", - pagination: &v1.Pagination{ - Limit: 1, - Offset: 0, - SortOption: &v1.SortOption{ - Field: "Cluster", - Reversed: false, - }, - }, - expectedClusters: []*v1.ScopeObject{pinkFloydClusterResponse}, - }, - { - name: "Offset and no limit restricts to a list of appropriate size starting with expected value", - pagination: &v1.Pagination{ - Limit: 0, - Offset: 1, - SortOption: &v1.SortOption{ - Field: "Cluster", - Reversed: false, - }, - }, - expectedClusters: []*v1.ScopeObject{queenClusterResponse}, - }, - { - name: "Sort options without offset nor limit return the expected results", - pagination: &v1.Pagination{ - Limit: 0, - Offset: 0, - SortOption: &v1.SortOption{ - Field: "Cluster", - Reversed: false, - }, - }, - expectedClusters: []*v1.ScopeObject{pinkFloydClusterResponse, queenClusterResponse}, - }, - { - name: "Reversed sort without offset nor limit return the expected results", - pagination: &v1.Pagination{ - Limit: 0, - Offset: 0, - SortOption: &v1.SortOption{ - Field: "Cluster", - Reversed: true, - }, - }, - expectedClusters: []*v1.ScopeObject{queenClusterResponse, pinkFloydClusterResponse}, - }, - } - - for _, c := range testCases { - s.Run(c.name, func() { - clusterResponse, err := s.service.GetClustersForPermissions(testCtx, &v1.GetClustersForPermissionsRequest{ - Pagination: c.pagination, - Permissions: []string{}, - }) - s.NoError(err) - protoassert.SlicesEqual(s.T(), clusterResponse.GetClusters(), c.expectedClusters) - }) - } -} - -func (s *serviceImplTestSuite) TestGetNamespacesForClusterAndPermissions() { - queenClusterID := s.clusterNameToIDMap[clusterQueen.GetName()] - testResourceScope1 := getTestResourceScopeSingleNamespace( - queenClusterID, - namespaceInnuendoInClusterQueen.GetName()) - pinkFloydClusterID := s.clusterNameToIDMap[clusterPinkFloyd.GetName()] - testResourceScope2 := getTestResourceScopeSingleNamespace( - pinkFloydClusterID, - namespaceTheWallInClusterPinkFloyd.GetName()) - testResourceScope3 := getTestResourceScopeSingleNamespace( - queenClusterID, - namespaceQueenInClusterQueen.GetName()) - testScopeMap := sac.TestScopeMap{ - storage.Access_READ_ACCESS: map[permissions.Resource]*sac.TestResourceScope{ - resources.Integration.GetResource(): { - Included: true, - }, - resources.Node.GetResource(): testResourceScope1, - resources.Deployment.GetResource(): testResourceScope1, - resources.NetworkGraph.GetResource(): testResourceScope2, - resources.Image.GetResource(): testResourceScope3, - }, - } - - queenQueenNamespaceResponse := &v1.ScopeObject{ - Id: getNamespaceID(namespaceQueenInClusterQueen.GetName()), - Name: namespaceQueenInClusterQueen.GetName(), - } - - queenInnuendoNamespaceResponse := &v1.ScopeObject{ - Id: getNamespaceID(namespaceInnuendoInClusterQueen.GetName()), - Name: namespaceInnuendoInClusterQueen.GetName(), - } - - pinkFloydTheWallNamespaceResponse := &v1.ScopeObject{ - Id: getNamespaceID(namespaceTheWallInClusterPinkFloyd.GetName()), - Name: namespaceTheWallInClusterPinkFloyd.GetName(), - } - - testCases := []struct { - name string - testedClusterID string - testedPermissions []string - expectedNamespaces []*v1.ScopeObject - }{ - { - name: "Global permission (Not Granted) gets no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{rolePermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Global permission (Granted) gets no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{integrationPermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Not granted Cluster scoped permission gets no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{clusterPermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Granted Cluster scoped permission gets no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{nodePermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Not granted Namespace scoped permission gets no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{namespacePermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Granted Namespace scoped permission gets only namespace data for namespaces in cluster and permission scope.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{deploymentPermission}, - expectedNamespaces: []*v1.ScopeObject{queenInnuendoNamespaceResponse}, - }, - { - name: "Granted Namespace scoped permission gets only namespace data for namespaces in cluster and permission scope (other permission).", - testedClusterID: s.clusterNameToIDMap[clusterPinkFloyd.GetName()], - testedPermissions: []string{networkGraphPermission}, - expectedNamespaces: []*v1.ScopeObject{pinkFloydTheWallNamespaceResponse}, - }, - { - name: "Multiple not granted Namespace scoped permissions get no namespace data.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{namespacePermission, deploymentExtensionPermission}, - expectedNamespaces: []*v1.ScopeObject{}, - }, - { - name: "Multiple Namespace scoped permissions get only namespace data for namespaces in granted permission scopes.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{namespacePermission, deploymentPermission}, - expectedNamespaces: []*v1.ScopeObject{queenInnuendoNamespaceResponse}, - }, - { - name: "empty permission list get namespace data for all namespaces in scope of target cluster and granted namespace permissions.", - testedClusterID: s.clusterNameToIDMap[clusterQueen.GetName()], - testedPermissions: []string{}, - expectedNamespaces: []*v1.ScopeObject{queenQueenNamespaceResponse, queenInnuendoNamespaceResponse}, - }, - } - - testCtx := sac.WithGlobalAccessScopeChecker(context.Background(), - sac.TestScopeCheckerCoreFromFullScopeMap(s.T(), testScopeMap)) - - for _, c := range testCases { - s.Run(c.name, func() { - request := &v1.GetNamespaceForClusterAndPermissionsRequest{ - ClusterId: c.testedClusterID, - Permissions: c.testedPermissions, - } - namespaceResponse, err := s.service.GetNamespacesForClusterAndPermissions(testCtx, request) - s.NoError(err) - protoassert.ElementsMatch(s.T(), namespaceResponse.GetNamespaces(), c.expectedNamespaces) - }) - } -} - -func getValidRole(name string) *storage.Role { - permissionSetID := accesscontrol.DefaultPermissionSetIDs[accesscontrol.Admin] - scopeID := accesscontrol.DefaultAccessScopeIDs[accesscontrol.UnrestrictedAccessScope] - return &storage.Role{ - Name: name, - Description: fmt.Sprintf("Test role for %s", name), - PermissionSetId: permissionSetID, - AccessScopeId: scopeID, - Traits: nil, - } -} - -func (s *serviceImplTestSuite) TestCreateRoleValidAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - roleName := "TestCreateRoleValidAccessScopeID" - - ps := s.createPermissionSet(roleName) - scope := s.createAccessScope(roleName) - - role := getValidRole(roleName) - role.PermissionSetId = ps.GetId() - role.AccessScopeId = scope.GetId() - createRoleRequest := &v1.CreateRoleRequest{ - Name: roleName, - Role: role, - } - _, err := s.service.CreateRole(ctx, createRoleRequest) - s.NoError(err) - s.storedRoleNames = append(s.storedRoleNames, role.GetName()) -} - -func (s *serviceImplTestSuite) TestCreateRoleEmptyAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - roleName := "TestCreateRoleEmptyAccessScopeID" - - ps := s.createPermissionSet(roleName) - - role := getValidRole(roleName) - role.PermissionSetId = ps.GetId() - role.AccessScopeId = "" - createRoleRequest := &v1.CreateRoleRequest{ - Name: roleName, - Role: role, - } - _, err := s.service.CreateRole(ctx, createRoleRequest) - s.ErrorContains(err, "role access_scope_id field must be set") -} - -func (s *serviceImplTestSuite) TestUpdateExistingRoleValidAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - role := s.createRole("TestUpdateExistingRoleValidAccessScopeID") - newScope := s.createAccessScope("new scope") - role.AccessScopeId = newScope.GetId() - _, err := s.service.UpdateRole(ctx, role) - s.NoError(err) -} - -func (s *serviceImplTestSuite) TestUpdateExistingRoleEmptyAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - roleName := "TestUpdateExistingRoleEmptyAccessScopeID" - role := s.createRole(roleName) - role.AccessScopeId = "" - _, err := s.service.UpdateRole(ctx, role) - s.ErrorContains(err, "role access_scope_id field must be set") -} - -func (s *serviceImplTestSuite) TestUpdateMissingRoleValidAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - roleName := "TestUpdateMissingRoleValidAccessScopeID" - ps := s.createPermissionSet(roleName) - scope := s.createAccessScope(roleName) - role := getValidRole(roleName) - role.PermissionSetId = ps.GetId() - role.AccessScopeId = scope.GetId() - _, err := s.service.UpdateRole(ctx, role) - s.ErrorIs(err, errox.NotFound) -} - -func (s *serviceImplTestSuite) TestUpdateMissingRoleEmptyAccessScopeID() { - ctx := sac.WithAllAccess(context.Background()) - roleName := "TestUpdateMissingRoleEmptyAccessScopeID" - ps := s.createPermissionSet(roleName) - role := getValidRole(roleName) - role.PermissionSetId = ps.GetId() - role.AccessScopeId = "" - _, err := s.service.UpdateRole(ctx, role) - s.ErrorContains(err, "role access_scope_id field must be set") -} - -func (s *serviceImplTestSuite) createAccessScope(name string) *storage.SimpleAccessScope { - ctx := sac.WithAllAccess(context.Background()) - scope := &storage.SimpleAccessScope{ - Name: name, - Description: fmt.Sprintf("Test access scope for %s", name), - Rules: &storage.SimpleAccessScope_Rules{ - IncludedClusters: []string{"test"}, - }, - Traits: nil, - } - postedScope, postErr := s.service.PostSimpleAccessScope(ctx, scope) - s.Require().NoError(postErr) - s.storedAccessScopeIDs = append(s.storedAccessScopeIDs, postedScope.GetId()) - return postedScope -} - -func (s *serviceImplTestSuite) deleteAccessScope(id string) { - ctx := sac.WithAllAccess(context.Background()) - request := &v1.ResourceByID{ - Id: id, - } - _, deleteErr := s.service.DeleteSimpleAccessScope(ctx, request) - s.Require().NoError(deleteErr) -} - -func (s *serviceImplTestSuite) createPermissionSet(name string) *storage.PermissionSet { - ctx := sac.WithAllAccess(context.Background()) - permissionSet := &storage.PermissionSet{ - Name: name, - Description: fmt.Sprintf("Test permission set for %s", name), - ResourceToAccess: nil, - Traits: nil, - } - ps, postErr := s.service.PostPermissionSet(ctx, permissionSet) - s.Require().NoError(postErr) - s.storedPermissionSetIDs = append(s.storedPermissionSetIDs, ps.GetId()) - return ps -} - -func (s *serviceImplTestSuite) deletePermissionSet(id string) { - ctx := sac.WithAllAccess(context.Background()) - request := &v1.ResourceByID{ - Id: id, - } - _, deleteErr := s.service.DeletePermissionSet(ctx, request) - s.Require().NoError(deleteErr) -} - -func (s *serviceImplTestSuite) createRole(roleName string) *storage.Role { - ctx := sac.WithAllAccess(context.Background()) - - ps := s.createPermissionSet(roleName) - scope := s.createAccessScope(roleName) - - createRoleRequest := &v1.CreateRoleRequest{ - Name: roleName, - Role: getValidRole(roleName), - } - createRoleRequest.Role.PermissionSetId = ps.GetId() - createRoleRequest.Role.AccessScopeId = scope.GetId() - - _, createErr := s.service.CreateRole(ctx, createRoleRequest) - s.Require().NoError(createErr) - s.storedRoleNames = append(s.storedRoleNames, roleName) - - readRoleRequest := &v1.ResourceByID{ - Id: roleName, - } - role, readErr := s.service.GetRole(ctx, readRoleRequest) - s.Require().NoError(readErr) - return role -} - -func (s *serviceImplTestSuite) deleteRole(name string) { - ctx := sac.WithAllAccess(context.Background()) - request := &v1.ResourceByID{ - Id: name, - } - _, deleteErr := s.service.DeleteRole(ctx, request) - s.Require().NoError(deleteErr) -} - func TestGetMyPermissions(t *testing.T) { suite.Run(t, new(roleServiceGetMyPermissionsTestSuite)) } diff --git a/central/role/service/test_helpers_test.go b/central/role/service/test_helpers_test.go new file mode 100644 index 0000000000000..386e556e97943 --- /dev/null +++ b/central/role/service/test_helpers_test.go @@ -0,0 +1,240 @@ +//go:build sql_integration + +package service + +import ( + "fmt" + "testing" + "time" + + clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" + namespaceDataStore "github.com/stackrox/rox/central/namespace/datastore" + rolePkg "github.com/stackrox/rox/central/role" + roleDataStore "github.com/stackrox/rox/central/role/datastore" + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/declarativeconfig" + "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/uuid" + "github.com/stretchr/testify/require" +) + +const ( + namespaceUUIDNamespace = "namespace" +) + +var ( + nilTraits *storage.Traits = nil + + imperativeOriginTraits = &storage.Traits{Origin: storage.Traits_IMPERATIVE} + + declarativeOriginTraits = &storage.Traits{Origin: storage.Traits_DECLARATIVE} + + orphanedDeclarativeOriginTraits = &storage.Traits{Origin: storage.Traits_DECLARATIVE_ORPHANED} + + dynamicOriginTraits = &storage.Traits{Origin: storage.Traits_DYNAMIC} +) + +type serviceImplTester struct { + postgres *pgtest.TestPostgres + service Service + + roleStore roleDataStore.DataStore + clusterStore clusterDataStore.DataStore + namespaceStore namespaceDataStore.DataStore + + storedClusterIDs []string + storedNamespaceIDs []string + clusterNameToIDMap map[string]string + + storedPermissionSetIDs []string + storedAccessScopeIDs []string + storedRoleNames []string +} + +func (s *serviceImplTester) Setup(t *testing.T) { + if s == nil { + return + } + + var err error + s.postgres = pgtest.ForT(t) + require.NotNil(t, s.postgres) + s.roleStore = roleDataStore.GetTestPostgresDataStore(t, s.postgres.DB) + clusterStore, err := clusterDataStore.GetTestPostgresDataStore(t, s.postgres.DB) + require.NoError(t, err) + s.clusterStore = clusterStore + namespaceStore, err := namespaceDataStore.GetTestPostgresDataStore(t, s.postgres.DB) + require.NoError(t, err) + s.namespaceStore = namespaceStore + + s.service = New(s.roleStore, s.clusterStore, s.namespaceStore) +} + +func (s *serviceImplTester) SetupTest(t *testing.T) { + if s == nil { + return + } + + s.storedAccessScopeIDs = make([]string, 0) + s.storedPermissionSetIDs = make([]string, 0) + s.storedRoleNames = make([]string, 0) + + s.storedClusterIDs = make([]string, 0) + s.storedNamespaceIDs = make([]string, 0) + s.clusterNameToIDMap = make(map[string]string, 0) + + writeCtx := sac.WithAllAccess(t.Context()) + + for _, cluster := range storageClusters { + clusterToAdd := cluster.CloneVT() + clusterToAdd.Id = "" + clusterToAdd.MainImage = "quay.io/rhacs-eng/main:latest" + id, err := s.clusterStore.AddCluster(writeCtx, clusterToAdd) + require.NoError(t, err) + s.clusterNameToIDMap[clusterToAdd.GetName()] = id + s.storedClusterIDs = append(s.storedClusterIDs, id) + } + + for _, namespace := range storageNamespaces { + ns := namespace.CloneVT() + ns.Id = getNamespaceID(ns.GetName()) + ns.ClusterId = s.clusterNameToIDMap[ns.GetClusterName()] + require.NoError(t, s.namespaceStore.AddNamespace(writeCtx, ns)) + s.storedNamespaceIDs = append(s.storedNamespaceIDs, ns.GetId()) + } +} + +func (s *serviceImplTester) TearDownTest(t *testing.T) { + if s == nil { + return + } + + writeCtx := sac.WithAllAccess(t.Context()) + for _, clusterID := range s.storedClusterIDs { + doneSignal := concurrency.NewSignal() + require.NoError(t, s.clusterStore.RemoveCluster(writeCtx, clusterID, &doneSignal)) + require.Eventually(t, + func() bool { return doneSignal.IsDone() }, + 5*time.Second, + 10*time.Millisecond, + ) + } + s.storedClusterIDs = s.storedClusterIDs[:0] + for _, namespaceID := range s.storedNamespaceIDs { + require.NoError(t, s.namespaceStore.RemoveNamespace(writeCtx, namespaceID)) + } + for _, roleName := range s.storedRoleNames { + s.deleteRole(t, roleName) + } + for _, permissionSetID := range s.storedPermissionSetIDs { + s.deletePermissionSet(t, permissionSetID) + } + for _, accessScopeID := range s.storedAccessScopeIDs { + s.deleteAccessScope(t, accessScopeID) + } +} + +func (s *serviceImplTester) createRole(t *testing.T, roleName string, traits *storage.Traits) *storage.Role { + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + + ps := s.createPermissionSet(t, roleName, traits) + scope := s.createAccessScope(t, roleName, traits) + + role := &storage.Role{ + Name: roleName, + Description: fmt.Sprintf("Test role for %s", roleName), + PermissionSetId: ps.GetId(), + AccessScopeId: scope.GetId(), + Traits: traits, + } + + err := s.roleStore.AddRole(ctx, role) + require.NoError(t, err) + s.storedRoleNames = append(s.storedRoleNames, roleName) + + return role +} + +func (s *serviceImplTester) deleteRole(t *testing.T, roleName string) { + if s == nil { + return + } + + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + request := &v1.ResourceByID{ + Id: roleName, + } + _, deleteErr := s.service.DeleteRole(ctx, request) + require.NoError(t, deleteErr) +} + +func (s *serviceImplTester) createPermissionSet(t *testing.T, name string, traits *storage.Traits) *storage.PermissionSet { + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + permissionSet := &storage.PermissionSet{ + Name: name, + Description: fmt.Sprintf("Test permission set for %s", name), + ResourceToAccess: nil, + Traits: traits, + } + permissionSet.Id = rolePkg.GeneratePermissionSetID() + err := s.roleStore.UpsertPermissionSet(ctx, permissionSet) + require.NoError(t, err) + s.storedPermissionSetIDs = append(s.storedPermissionSetIDs, permissionSet.GetId()) + return permissionSet +} + +func (s *serviceImplTester) deletePermissionSet(t *testing.T, permissionSetID string) { + if s == nil { + return + } + + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + request := &v1.ResourceByID{ + Id: permissionSetID, + } + _, deleteErr := s.service.DeletePermissionSet(ctx, request) + require.NoError(t, deleteErr) +} + +func (s *serviceImplTester) createAccessScope(t *testing.T, name string, traits *storage.Traits) *storage.SimpleAccessScope { + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + scope := &storage.SimpleAccessScope{ + Name: name, + Description: fmt.Sprintf("Test access scope for %s", name), + Rules: &storage.SimpleAccessScope_Rules{ + IncludedClusters: []string{"test"}, + }, + Traits: traits, + } + scope.Id = rolePkg.GenerateAccessScopeID() + err := s.roleStore.UpsertAccessScope(ctx, scope) + require.NoError(t, err) + s.storedAccessScopeIDs = append(s.storedAccessScopeIDs, scope.GetId()) + return scope +} + +func (s *serviceImplTester) deleteAccessScope(t *testing.T, accessScopeID string) { + if s == nil { + return + } + + ctx := sac.WithAllAccess(t.Context()) + ctx = declarativeconfig.WithModifyDeclarativeOrImperative(ctx) + request := &v1.ResourceByID{ + Id: accessScopeID, + } + _, deleteErr := s.service.DeleteSimpleAccessScope(ctx, request) + require.NoError(t, deleteErr) +} + +func getNamespaceID(namespaceName string) string { + return uuid.NewV5FromNonUUIDs(namespaceUUIDNamespace, namespaceName).String() +} diff --git a/generated/api/v1/auth_service.swagger.json b/generated/api/v1/auth_service.swagger.json index cc13a546bfa5d..bc1fee72728a4 100644 --- a/generated/api/v1/auth_service.swagger.json +++ b/generated/api/v1/auth_service.swagger.json @@ -486,10 +486,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/authprovider_service.swagger.json b/generated/api/v1/authprovider_service.swagger.json index e628c6d1b4f07..f3d09e80c2350 100644 --- a/generated/api/v1/authprovider_service.swagger.json +++ b/generated/api/v1/authprovider_service.swagger.json @@ -609,10 +609,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/group_service.swagger.json b/generated/api/v1/group_service.swagger.json index 9bd4362b7378b..83bd2b92da896 100644 --- a/generated/api/v1/group_service.swagger.json +++ b/generated/api/v1/group_service.swagger.json @@ -72,7 +72,8 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE" }, @@ -397,10 +398,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/notifier_service.swagger.json b/generated/api/v1/notifier_service.swagger.json index 2fa340f224bab..ea69b91dd268c 100644 --- a/generated/api/v1/notifier_service.swagger.json +++ b/generated/api/v1/notifier_service.swagger.json @@ -898,10 +898,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/role_service.swagger.json b/generated/api/v1/role_service.swagger.json index bd8b288e4f28d..ddbdeda1f8c64 100644 --- a/generated/api/v1/role_service.swagger.json +++ b/generated/api/v1/role_service.swagger.json @@ -1082,10 +1082,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/signature_integration_service.swagger.json b/generated/api/v1/signature_integration_service.swagger.json index 6d9fb27608569..e693bd546db6a 100644 --- a/generated/api/v1/signature_integration_service.swagger.json +++ b/generated/api/v1/signature_integration_service.swagger.json @@ -338,10 +338,11 @@ "IMPERATIVE", "DEFAULT", "DECLARATIVE", - "DECLARATIVE_ORPHANED" + "DECLARATIVE_ORPHANED", + "DYNAMIC" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have four different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/storage/traits.pb.go b/generated/storage/traits.pb.go index bbe5adbd6200e..e15899cebf7bb 100644 --- a/generated/storage/traits.pb.go +++ b/generated/storage/traits.pb.go @@ -126,18 +126,21 @@ func (Traits_Visibility) EnumDescriptor() ([]byte, []int) { } // Origin specifies the origin of an object. -// Objects can have four different origins: +// Objects can have five different origins: // - IMPERATIVE: the object was created via the API. This is assumed by default. // - DEFAULT: the object is a default object, such as default roles, access scopes etc. // - DECLARATIVE: the object is created via declarative configuration. // - DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object) +// - DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral. // Based on the origin, different rules apply to the objects. // Objects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration. -// Additionally, they may not reference objects with the IMPERATIVE origin. +// Additionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin. // Objects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration. // They may be referenced by all other objects. // Objects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration. // They may reference all other objects. +// Objects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration. +// They may reference all other objects. // Objects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration. // DECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration. // Objects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore. @@ -149,6 +152,7 @@ const ( Traits_DEFAULT Traits_Origin = 1 Traits_DECLARATIVE Traits_Origin = 2 Traits_DECLARATIVE_ORPHANED Traits_Origin = 3 + Traits_DYNAMIC Traits_Origin = 4 ) // Enum value maps for Traits_Origin. @@ -158,12 +162,14 @@ var ( 1: "DEFAULT", 2: "DECLARATIVE", 3: "DECLARATIVE_ORPHANED", + 4: "DYNAMIC", } Traits_Origin_value = map[string]int32{ "IMPERATIVE": 0, "DEFAULT": 1, "DECLARATIVE": 2, "DECLARATIVE_ORPHANED": 3, + "DYNAMIC": 4, } ) @@ -258,7 +264,7 @@ var File_storage_traits_proto protoreflect.FileDescriptor const file_storage_traits_proto_rawDesc = "" + "\n" + - "\x14storage/traits.proto\x12\astorage\"\xf3\x02\n" + + "\x14storage/traits.proto\x12\astorage\"\x80\x03\n" + "\x06Traits\x12G\n" + "\x0fmutability_mode\x18\x01 \x01(\x0e2\x1e.storage.Traits.MutabilityModeR\x0emutabilityMode\x12:\n" + "\n" + @@ -272,13 +278,14 @@ const file_storage_traits_proto_rawDesc = "" + "Visibility\x12\v\n" + "\aVISIBLE\x10\x00\x12\n" + "\n" + - "\x06HIDDEN\x10\x01\"P\n" + + "\x06HIDDEN\x10\x01\"]\n" + "\x06Origin\x12\x0e\n" + "\n" + "IMPERATIVE\x10\x00\x12\v\n" + "\aDEFAULT\x10\x01\x12\x0f\n" + "\vDECLARATIVE\x10\x02\x12\x18\n" + - "\x14DECLARATIVE_ORPHANED\x10\x03B.\n" + + "\x14DECLARATIVE_ORPHANED\x10\x03\x12\v\n" + + "\aDYNAMIC\x10\x04B.\n" + "\x19io.stackrox.proto.storageZ\x11./storage;storageb\x06proto3" var ( diff --git a/pkg/declarativeconfig/context.go b/pkg/declarativeconfig/context.go index 4402441a1a924..3ebb4135f2b69 100644 --- a/pkg/declarativeconfig/context.go +++ b/pkg/declarativeconfig/context.go @@ -41,7 +41,7 @@ func CanModifyResource(ctx context.Context, resource ResourceWithTraits) bool { return IsDeclarativeOrigin(resource) } if ctx.Value(originCheckerKey{}) == allowModifyDeclarativeOrImperative { - return IsDeclarativeOrigin(resource) || resource.GetTraits().GetOrigin() == storage.Traits_IMPERATIVE + return IsDeclarativeOrigin(resource) || IsImperativeOrigin(resource) || IsDynamicOrigin(resource) } - return resource.GetTraits().GetOrigin() == storage.Traits_IMPERATIVE + return IsImperativeOrigin(resource) || IsDynamicOrigin(resource) } diff --git a/pkg/declarativeconfig/context_test.go b/pkg/declarativeconfig/context_test.go index dab253f83c080..a412a27b7fa93 100644 --- a/pkg/declarativeconfig/context_test.go +++ b/pkg/declarativeconfig/context_test.go @@ -18,27 +18,34 @@ func (m *resourceWithTraitsMock) GetTraits() *storage.Traits { func TestContext(t *testing.T) { imperativeResource := &resourceWithTraitsMock{origin: storage.Traits_IMPERATIVE} + dynamicResource := &resourceWithTraitsMock{origin: storage.Traits_DYNAMIC} declarativeResource := &resourceWithTraitsMock{origin: storage.Traits_DECLARATIVE} defaultResource := &resourceWithTraitsMock{origin: storage.Traits_DEFAULT} ctx := context.Background() declarativeCtx := WithModifyDeclarativeResource(ctx) declarativeOrImperativeCtx := WithModifyDeclarativeOrImperative(ctx) - // 1. empty context can modify imperative origin + // 1.1. empty context can modify imperative origin assert.True(t, CanModifyResource(ctx, imperativeResource)) + // 1.2. empty context can modify dynamic origin + assert.True(t, CanModifyResource(ctx, dynamicResource)) // 2. empty context can't modify declarative origin assert.False(t, CanModifyResource(ctx, declarativeResource)) // 3. empty context can't modify default origin assert.False(t, CanModifyResource(ctx, defaultResource)) // 4. context.WithModifyDeclarativeResource can modify declarative origin assert.True(t, CanModifyResource(declarativeCtx, declarativeResource)) - // 5. context.WithModifyDeclarativeResource can't modify imperative origin + // 5.1. context.WithModifyDeclarativeResource can't modify imperative origin assert.False(t, CanModifyResource(declarativeCtx, imperativeResource)) + // 5.2. context.WithModifyDeclarativeResource can't modify dynamic origin + assert.False(t, CanModifyResource(declarativeCtx, dynamicResource)) // 6. context.WithModifyDeclarativeResource can't modify default origin assert.False(t, CanModifyResource(declarativeCtx, defaultResource)) // 7. context.WithModifyDeclarativeOrImperative can modify declarative origin assert.True(t, CanModifyResource(declarativeOrImperativeCtx, declarativeResource)) - // 8. context.WithModifyDeclarativeOrImperative can modify imperative origin + // 8.1. context.WithModifyDeclarativeOrImperative can modify imperative origin assert.True(t, CanModifyResource(declarativeOrImperativeCtx, imperativeResource)) + // 8.2. context.WithModifyDeclarativeOrImperative can modify dynamic origin + assert.True(t, CanModifyResource(declarativeOrImperativeCtx, dynamicResource)) // 9. context.WithModifyDeclarativeOrImperative can't modify default origin assert.False(t, CanModifyResource(declarativeOrImperativeCtx, defaultResource)) } diff --git a/pkg/declarativeconfig/origin.go b/pkg/declarativeconfig/origin.go index 3637d896ce89f..9e3b055e16787 100644 --- a/pkg/declarativeconfig/origin.go +++ b/pkg/declarativeconfig/origin.go @@ -7,7 +7,7 @@ import ( // VerifyReferencedResourceOrigin returns an error if resource is forbidden from referencing other resource. func VerifyReferencedResourceOrigin(referenced, referencing ResourceWithTraits, referencedName, referencingName string) error { - if !IsDeclarativeOrigin(referencing) || referenced.GetTraits().GetOrigin() != storage.Traits_IMPERATIVE { + if !IsDeclarativeOrigin(referencing) || (!IsImperativeOrigin(referenced) && !IsDynamicOrigin(referenced)) { return nil } // referenced is imperative or default, while referencing is not @@ -18,3 +18,13 @@ func VerifyReferencedResourceOrigin(referenced, referencing ResourceWithTraits, func IsDeclarativeOrigin(resource ResourceWithTraits) bool { return resource.GetTraits().GetOrigin() == storage.Traits_DECLARATIVE || resource.GetTraits().GetOrigin() == storage.Traits_DECLARATIVE_ORPHANED } + +// IsDynamicOrigin returns whether origin of resource is dynamic or not. +func IsDynamicOrigin(resource ResourceWithTraits) bool { + return resource.GetTraits().GetOrigin() == storage.Traits_DYNAMIC +} + +// IsImperativeOrigin returns whether origin of resource is imperative or not. +func IsImperativeOrigin(resource ResourceWithTraits) bool { + return resource.GetTraits().GetOrigin() == storage.Traits_IMPERATIVE +} diff --git a/pkg/declarativeconfig/origin_test.go b/pkg/declarativeconfig/origin_test.go index 369efdc7474fc..0ad1d22ad90b9 100644 --- a/pkg/declarativeconfig/origin_test.go +++ b/pkg/declarativeconfig/origin_test.go @@ -20,6 +20,9 @@ func TestVerifyReferencedResourceOrigin(t *testing.T) { declarativeTraits := &storage.Traits{ Origin: storage.Traits_DECLARATIVE, } + dynamicTraits := &storage.Traits{ + Origin: storage.Traits_DYNAMIC, + } imperativeTraits := &storage.Traits{ Origin: storage.Traits_IMPERATIVE, } @@ -35,16 +38,25 @@ func TestVerifyReferencedResourceOrigin(t *testing.T) { testNoError(t, declarativeTraits, orphanedTraits) testNoError(t, declarativeTraits, defaultTraits) testError(t, declarativeTraits, imperativeTraits) + testError(t, declarativeTraits, dynamicTraits) testNoError(t, orphanedTraits, declarativeTraits) testNoError(t, orphanedTraits, orphanedTraits) testNoError(t, orphanedTraits, defaultTraits) testError(t, orphanedTraits, imperativeTraits) + testError(t, orphanedTraits, dynamicTraits) testNoError(t, imperativeTraits, declarativeTraits) testNoError(t, imperativeTraits, orphanedTraits) testNoError(t, imperativeTraits, defaultTraits) testNoError(t, imperativeTraits, imperativeTraits) + testNoError(t, imperativeTraits, dynamicTraits) + + testNoError(t, dynamicTraits, declarativeTraits) + testNoError(t, dynamicTraits, orphanedTraits) + testNoError(t, dynamicTraits, defaultTraits) + testNoError(t, dynamicTraits, imperativeTraits) + testNoError(t, dynamicTraits, dynamicTraits) } func testError(t *testing.T, referencing *storage.Traits, referenced *storage.Traits) { diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 76dfd308e6c5e..533752ba7c9dc 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -18590,6 +18590,10 @@ { "name": "DECLARATIVE_ORPHANED", "integer": 3 + }, + { + "name": "DYNAMIC", + "integer": 4 } ] } diff --git a/proto/storage/traits.proto b/proto/storage/traits.proto index 56db3bb85da57..6901bf9e84115 100644 --- a/proto/storage/traits.proto +++ b/proto/storage/traits.proto @@ -32,18 +32,21 @@ message Traits { Visibility visibility = 2; // Origin specifies the origin of an object. - // Objects can have four different origins: + // Objects can have five different origins: // - IMPERATIVE: the object was created via the API. This is assumed by default. // - DEFAULT: the object is a default object, such as default roles, access scopes etc. // - DECLARATIVE: the object is created via declarative configuration. // - DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object) + // - DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral. // Based on the origin, different rules apply to the objects. // Objects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration. - // Additionally, they may not reference objects with the IMPERATIVE origin. + // Additionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin. // Objects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration. // They may be referenced by all other objects. // Objects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration. // They may reference all other objects. + // Objects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration. + // They may reference all other objects. // Objects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration. // DECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration. // Objects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore. @@ -53,6 +56,7 @@ message Traits { DEFAULT = 1; DECLARATIVE = 2; DECLARATIVE_ORPHANED = 3; + DYNAMIC = 4; } Origin origin = 3; } diff --git a/ui/apps/platform/src/types/traits.proto.ts b/ui/apps/platform/src/types/traits.proto.ts index fc759c6904a43..72f02b85f53bb 100644 --- a/ui/apps/platform/src/types/traits.proto.ts +++ b/ui/apps/platform/src/types/traits.proto.ts @@ -5,5 +5,10 @@ export type Traits = { }; export type TraitsMutabilityMode = 'ALLOW_MUTATE' | 'ALLOW_MUTATE_FORCED'; -export type TraitsOrigin = 'IMPERATIVE' | 'DECLARATIVE' | 'DEFAULT' | 'DECLARATIVE_ORPHANED'; +export type TraitsOrigin = + | 'IMPERATIVE' + | 'DECLARATIVE' + | 'DEFAULT' + | 'DECLARATIVE_ORPHANED' + | 'DYNAMIC'; export type TraitsVisibility = 'VISIBLE' | 'HIDDEN'; diff --git a/ui/apps/platform/src/utils/traits.utils.ts b/ui/apps/platform/src/utils/traits.utils.ts index 752fefc48b9d7..224af1919ce41 100644 --- a/ui/apps/platform/src/utils/traits.utils.ts +++ b/ui/apps/platform/src/utils/traits.utils.ts @@ -9,6 +9,7 @@ export const traitsOriginLabels = { IMPERATIVE: 'User', DECLARATIVE: 'Declarative', DECLARATIVE_ORPHANED: 'Declarative, Orphaned', + DYNAMIC: 'Dynamic', } as const; export const originLabelColours = { From 1cd508073ddd33cb9b29adc68e06ee93e4974cd1 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Tue, 27 Jan 2026 08:44:49 -0600 Subject: [PATCH 032/232] ROX-32757: added LayerType (#18580) --- .../resolvers/image_components_utilities.go | 2 +- .../datastore_impl_flat_postgres_test.go | 1 + .../datastore/store/common/v2/parts_test.go | 8 +- .../datastore/store/common/v2/split_v2.go | 7 +- .../v2/datastore/store/postgres/store.go | 8 +- .../datastore/store/common/parts_test.go | 8 +- .../imagev2/datastore/store/common/split.go | 7 +- generated/storage/image_component.pb.go | 104 ++++++++++++++---- .../storage/image_component_vtproto.pb.go | 52 +++++++++ pkg/postgres/schema/image_component_v2.go | 2 +- pkg/search/options.go | 14 +-- proto/storage/image_component.proto | 9 +- proto/storage/proto.lock | 27 ++++- 13 files changed, 203 insertions(+), 46 deletions(-) diff --git a/central/graphql/resolvers/image_components_utilities.go b/central/graphql/resolvers/image_components_utilities.go index e9db5ed1e5ffd..05373ab599c1a 100644 --- a/central/graphql/resolvers/image_components_utilities.go +++ b/central/graphql/resolvers/image_components_utilities.go @@ -130,5 +130,5 @@ func (resolver *imageComponentV2Resolver) Version(_ context.Context) string { } func (resolver *imageComponentV2Resolver) InBaseImageLayer(ctx context.Context) bool { - return resolver.data.GetFromBaseImage() + return resolver.data.GetLayerType() == storage.LayerType_BASE_IMAGE } diff --git a/central/image/datastore/datastore_impl_flat_postgres_test.go b/central/image/datastore/datastore_impl_flat_postgres_test.go index de8570adc0b0a..7d14fc7fd2135 100644 --- a/central/image/datastore/datastore_impl_flat_postgres_test.go +++ b/central/image/datastore/datastore_impl_flat_postgres_test.go @@ -636,6 +636,7 @@ func cloneAndUpdateRiskPriority(image *storage.Image) *storage.Image { for _, component := range cloned.GetScan().GetComponents() { component.Priority = 1 } + cloned.LastUpdated = image.GetLastUpdated() return cloned } diff --git a/central/image/datastore/store/common/v2/parts_test.go b/central/image/datastore/store/common/v2/parts_test.go index 692c241aa0de5..a67fbc13d45b5 100644 --- a/central/image/datastore/store/common/v2/parts_test.go +++ b/central/image/datastore/store/common/v2/parts_test.go @@ -332,7 +332,7 @@ func TestSplitAndMergeImage(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 1, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEParts{}, }, @@ -345,7 +345,7 @@ func TestSplitAndMergeImage(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 3, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEParts{ { @@ -406,7 +406,7 @@ func TestSplitAndMergeImage(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 2, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEParts{ { @@ -450,7 +450,7 @@ func TestSplitAndMergeImage(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 2, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEParts{ { diff --git a/central/image/datastore/store/common/v2/split_v2.go b/central/image/datastore/store/common/v2/split_v2.go index 19a92a7c43b4b..3d103025269ee 100644 --- a/central/image/datastore/store/common/v2/split_v2.go +++ b/central/image/datastore/store/common/v2/split_v2.go @@ -96,7 +96,10 @@ func GenerateImageComponentV2(os string, image *storage.Image, index int, from * LayerIndex: from.GetLayerIndex(), } } - // TODO ROX-31847 compute image component base image layer type - ret.FromBaseImage = len(image.GetBaseImageInfo()) > 0 + + ret.LayerType = storage.LayerType_APPLICATION + if len(image.GetBaseImageInfo()) > 0 { + ret.LayerType = storage.LayerType_BASE_IMAGE + } return ret, nil } diff --git a/central/imagecomponent/v2/datastore/store/postgres/store.go b/central/imagecomponent/v2/datastore/store/postgres/store.go index 0faadd18f8b83..40bb3e87d45ec 100644 --- a/central/imagecomponent/v2/datastore/store/postgres/store.go +++ b/central/imagecomponent/v2/datastore/store/postgres/store.go @@ -111,11 +111,11 @@ func insertIntoImageComponentV2(batch *pgx.Batch, obj *storage.ImageComponentV2) obj.GetImageId(), obj.GetLocation(), pgutils.NilOrString(obj.GetImageIdV2()), - obj.GetFromBaseImage(), + obj.GetLayerType(), serialized, } - finalStr := "INSERT INTO image_component_v2 (Id, Name, Version, Priority, Source, RiskScore, TopCvss, OperatingSystem, ImageId, Location, ImageIdV2, FromBaseImage, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, Name = EXCLUDED.Name, Version = EXCLUDED.Version, Priority = EXCLUDED.Priority, Source = EXCLUDED.Source, RiskScore = EXCLUDED.RiskScore, TopCvss = EXCLUDED.TopCvss, OperatingSystem = EXCLUDED.OperatingSystem, ImageId = EXCLUDED.ImageId, Location = EXCLUDED.Location, ImageIdV2 = EXCLUDED.ImageIdV2, FromBaseImage = EXCLUDED.FromBaseImage, serialized = EXCLUDED.serialized" + finalStr := "INSERT INTO image_component_v2 (Id, Name, Version, Priority, Source, RiskScore, TopCvss, OperatingSystem, ImageId, Location, ImageIdV2, LayerType, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, Name = EXCLUDED.Name, Version = EXCLUDED.Version, Priority = EXCLUDED.Priority, Source = EXCLUDED.Source, RiskScore = EXCLUDED.RiskScore, TopCvss = EXCLUDED.TopCvss, OperatingSystem = EXCLUDED.OperatingSystem, ImageId = EXCLUDED.ImageId, Location = EXCLUDED.Location, ImageIdV2 = EXCLUDED.ImageIdV2, LayerType = EXCLUDED.LayerType, serialized = EXCLUDED.serialized" batch.Queue(finalStr, values...) return nil @@ -133,7 +133,7 @@ var copyColsImageComponentV2 = []string{ "imageid", "location", "imageidv2", - "frombaseimage", + "layertype", "serialized", } @@ -179,7 +179,7 @@ func copyFromImageComponentV2(ctx context.Context, s pgSearch.Deleter, tx *postg obj.GetImageId(), obj.GetLocation(), pgutils.NilOrString(obj.GetImageIdV2()), - obj.GetFromBaseImage(), + obj.GetLayerType(), serialized, }, nil }) diff --git a/central/imagev2/datastore/store/common/parts_test.go b/central/imagev2/datastore/store/common/parts_test.go index 6a404295e7dba..c720ac0345818 100644 --- a/central/imagev2/datastore/store/common/parts_test.go +++ b/central/imagev2/datastore/store/common/parts_test.go @@ -342,7 +342,7 @@ func TestSplitAndMergeImageV2(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 1, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEPartsV2{}, }, @@ -355,7 +355,7 @@ func TestSplitAndMergeImageV2(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 3, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEPartsV2{ { @@ -416,7 +416,7 @@ func TestSplitAndMergeImageV2(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 2, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEPartsV2{ { @@ -460,7 +460,7 @@ func TestSplitAndMergeImageV2(t *testing.T) { HasLayerIndex: &storage.ImageComponentV2_LayerIndex{ LayerIndex: 2, }, - FromBaseImage: true, + LayerType: storage.LayerType_BASE_IMAGE, }, Children: []CVEPartsV2{ { diff --git a/central/imagev2/datastore/store/common/split.go b/central/imagev2/datastore/store/common/split.go index b581958775dea..11c5ebd383ddf 100644 --- a/central/imagev2/datastore/store/common/split.go +++ b/central/imagev2/datastore/store/common/split.go @@ -100,7 +100,10 @@ func GenerateImageComponentV2(os string, image *storage.ImageV2, index int, from LayerIndex: from.GetLayerIndex(), } } - // TODO ROX-31847 compute image component base image layer type - ret.FromBaseImage = len(image.GetBaseImageInfo()) > 0 + + ret.LayerType = storage.LayerType_APPLICATION + if len(image.GetBaseImageInfo()) > 0 { + ret.LayerType = storage.LayerType_BASE_IMAGE + } return ret, nil } diff --git a/generated/storage/image_component.pb.go b/generated/storage/image_component.pb.go index 54bf3a00fa385..e694bb0c17edb 100644 --- a/generated/storage/image_component.pb.go +++ b/generated/storage/image_component.pb.go @@ -21,6 +21,52 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type LayerType int32 + +const ( + LayerType_APPLICATION LayerType = 0 + LayerType_BASE_IMAGE LayerType = 1 +) + +// Enum value maps for LayerType. +var ( + LayerType_name = map[int32]string{ + 0: "APPLICATION", + 1: "BASE_IMAGE", + } + LayerType_value = map[string]int32{ + "APPLICATION": 0, + "BASE_IMAGE": 1, + } +) + +func (x LayerType) Enum() *LayerType { + p := new(LayerType) + *p = x + return p +} + +func (x LayerType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (LayerType) Descriptor() protoreflect.EnumDescriptor { + return file_storage_image_component_proto_enumTypes[0].Descriptor() +} + +func (LayerType) Type() protoreflect.EnumType { + return &file_storage_image_component_proto_enumTypes[0] +} + +func (x LayerType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use LayerType.Descriptor instead. +func (LayerType) EnumDescriptor() ([]byte, []int) { + return file_storage_image_component_proto_rawDescGZIP(), []int{0} +} + // This proto is deprecated and replaced by ImageComponentV2 // // Deprecated: Marked as deprecated in storage/image_component.proto. @@ -191,8 +237,10 @@ type ImageComponentV2 struct { HasLayerIndex isImageComponentV2_HasLayerIndex `protobuf_oneof:"has_layer_index"` Location string `protobuf:"bytes,12,opt,name=location,proto3" json:"location,omitempty" search:"Component Location,hidden"` // @gotags: search:"Component Location,hidden" Architecture string `protobuf:"bytes,13,opt,name=architecture,proto3" json:"architecture,omitempty"` - ImageIdV2 string `protobuf:"bytes,14,opt,name=image_id_v2,json=imageIdV2,proto3" json:"image_id_v2,omitempty" sql:"fk(ImageV2:id),index=btree,allow-null"` // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" - FromBaseImage bool `protobuf:"varint,15,opt,name=from_base_image,json=fromBaseImage,proto3" json:"from_base_image,omitempty" search:"Component From Base Image,hidden"` // @gotags: search:"Component From Base Image,hidden" + ImageIdV2 string `protobuf:"bytes,14,opt,name=image_id_v2,json=imageIdV2,proto3" json:"image_id_v2,omitempty" sql:"fk(ImageV2:id),index=btree,allow-null"` // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" + // Deprecated: Marked as deprecated in storage/image_component.proto. + FromBaseImage bool `protobuf:"varint,15,opt,name=from_base_image,json=fromBaseImage,proto3" json:"from_base_image,omitempty"` + LayerType LayerType `protobuf:"varint,16,opt,name=layer_type,json=layerType,proto3,enum=storage.LayerType" json:"layer_type,omitempty" search:"Component Layer Type"` // @gotags: search:"Component Layer Type" unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -344,6 +392,7 @@ func (x *ImageComponentV2) GetImageIdV2() string { return "" } +// Deprecated: Marked as deprecated in storage/image_component.proto. func (x *ImageComponentV2) GetFromBaseImage() bool { if x != nil { return x.FromBaseImage @@ -351,6 +400,13 @@ func (x *ImageComponentV2) GetFromBaseImage() bool { return false } +func (x *ImageComponentV2) GetLayerType() LayerType { + if x != nil { + return x.LayerType + } + return LayerType_APPLICATION +} + type isImageComponentV2_SetTopCvss interface { isImageComponentV2_SetTopCvss() } @@ -389,7 +445,7 @@ const file_storage_image_component_proto_rawDesc = "" + "\bfixed_by\x18\t \x01(\tR\afixedBy\x12)\n" + "\x10operating_system\x18\n" + " \x01(\tR\x0foperatingSystem:\x02\x18\x01B\x0e\n" + - "\fset_top_cvss\"\x88\x04\n" + + "\fset_top_cvss\"\xbf\x04\n" + "\x10ImageComponentV2\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12\x18\n" + @@ -407,10 +463,16 @@ const file_storage_image_component_proto_rawDesc = "" + "layerIndex\x12\x1a\n" + "\blocation\x18\f \x01(\tR\blocation\x12\"\n" + "\farchitecture\x18\r \x01(\tR\farchitecture\x12\x1e\n" + - "\vimage_id_v2\x18\x0e \x01(\tR\timageIdV2\x12&\n" + - "\x0ffrom_base_image\x18\x0f \x01(\bR\rfromBaseImageB\x0e\n" + + "\vimage_id_v2\x18\x0e \x01(\tR\timageIdV2\x12*\n" + + "\x0ffrom_base_image\x18\x0f \x01(\bB\x02\x18\x01R\rfromBaseImage\x121\n" + + "\n" + + "layer_type\x18\x10 \x01(\x0e2\x12.storage.LayerTypeR\tlayerTypeB\x0e\n" + "\fset_top_cvssB\x11\n" + - "\x0fhas_layer_indexB.\n" + + "\x0fhas_layer_index*,\n" + + "\tLayerType\x12\x0f\n" + + "\vAPPLICATION\x10\x00\x12\x0e\n" + + "\n" + + "BASE_IMAGE\x10\x01B.\n" + "\x19io.stackrox.proto.storageZ\x11./storage;storageb\x06proto3" var ( @@ -425,22 +487,25 @@ func file_storage_image_component_proto_rawDescGZIP() []byte { return file_storage_image_component_proto_rawDescData } +var file_storage_image_component_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_storage_image_component_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_storage_image_component_proto_goTypes = []any{ - (*ImageComponent)(nil), // 0: storage.ImageComponent - (*ImageComponentV2)(nil), // 1: storage.ImageComponentV2 - (*License)(nil), // 2: storage.License - (SourceType)(0), // 3: storage.SourceType + (LayerType)(0), // 0: storage.LayerType + (*ImageComponent)(nil), // 1: storage.ImageComponent + (*ImageComponentV2)(nil), // 2: storage.ImageComponentV2 + (*License)(nil), // 3: storage.License + (SourceType)(0), // 4: storage.SourceType } var file_storage_image_component_proto_depIdxs = []int32{ - 2, // 0: storage.ImageComponent.license:type_name -> storage.License - 3, // 1: storage.ImageComponent.source:type_name -> storage.SourceType - 3, // 2: storage.ImageComponentV2.source:type_name -> storage.SourceType - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 3, // 0: storage.ImageComponent.license:type_name -> storage.License + 4, // 1: storage.ImageComponent.source:type_name -> storage.SourceType + 4, // 2: storage.ImageComponentV2.source:type_name -> storage.SourceType + 0, // 3: storage.ImageComponentV2.layer_type:type_name -> storage.LayerType + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_storage_image_component_proto_init() } @@ -461,13 +526,14 @@ func file_storage_image_component_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_storage_image_component_proto_rawDesc), len(file_storage_image_component_proto_rawDesc)), - NumEnums: 0, + NumEnums: 1, NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_storage_image_component_proto_goTypes, DependencyIndexes: file_storage_image_component_proto_depIdxs, + EnumInfos: file_storage_image_component_proto_enumTypes, MessageInfos: file_storage_image_component_proto_msgTypes, }.Build() File_storage_image_component_proto = out.File diff --git a/generated/storage/image_component_vtproto.pb.go b/generated/storage/image_component_vtproto.pb.go index 3cde9af3e6266..40d0e0cae62a9 100644 --- a/generated/storage/image_component_vtproto.pb.go +++ b/generated/storage/image_component_vtproto.pb.go @@ -79,6 +79,7 @@ func (m *ImageComponentV2) CloneVT() *ImageComponentV2 { r.Architecture = m.Architecture r.ImageIdV2 = m.ImageIdV2 r.FromBaseImage = m.FromBaseImage + r.LayerType = m.LayerType if m.SetTopCvss != nil { r.SetTopCvss = m.SetTopCvss.(interface { CloneVT() isImageComponentV2_SetTopCvss @@ -259,6 +260,9 @@ func (this *ImageComponentV2) EqualVT(that *ImageComponentV2) bool { if this.FromBaseImage != that.FromBaseImage { return false } + if this.LayerType != that.LayerType { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -467,6 +471,13 @@ func (m *ImageComponentV2) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } i -= size } + if m.LayerType != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LayerType)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } if m.FromBaseImage { i-- if m.FromBaseImage { @@ -699,6 +710,9 @@ func (m *ImageComponentV2) SizeVT() (n int) { if m.FromBaseImage { n += 2 } + if m.LayerType != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.LayerType)) + } n += len(m.unknownFields) return n } @@ -1445,6 +1459,25 @@ func (m *ImageComponentV2) UnmarshalVT(dAtA []byte) error { } } m.FromBaseImage = bool(v != 0) + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerType", wireType) + } + m.LayerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerType |= LayerType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -2247,6 +2280,25 @@ func (m *ImageComponentV2) UnmarshalVTUnsafe(dAtA []byte) error { } } m.FromBaseImage = bool(v != 0) + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerType", wireType) + } + m.LayerType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerType |= LayerType(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/postgres/schema/image_component_v2.go b/pkg/postgres/schema/image_component_v2.go index 91de60c2be91c..f8d9f6db340c9 100644 --- a/pkg/postgres/schema/image_component_v2.go +++ b/pkg/postgres/schema/image_component_v2.go @@ -72,7 +72,7 @@ type ImageComponentV2 struct { ImageID string `gorm:"column:imageid;type:varchar;index:imagecomponentv2_imageid,type:btree"` Location string `gorm:"column:location;type:varchar"` ImageIDV2 string `gorm:"column:imageidv2;type:varchar;index:imagecomponentv2_imageidv2,type:btree"` - FromBaseImage bool `gorm:"column:frombaseimage;type:bool"` + LayerType storage.LayerType `gorm:"column:layertype;type:integer"` Serialized []byte `gorm:"column:serialized;type:bytea"` ImagesRef Images `gorm:"foreignKey:imageid;references:id;belongsTo;constraint:OnDelete:CASCADE"` ImagesV2Ref ImagesV2 `gorm:"foreignKey:imageidv2;references:id;belongsTo;constraint:OnDelete:CASCADE"` diff --git a/pkg/search/options.go b/pkg/search/options.go index 3a62b574c0fee..fb4d931804b2c 100644 --- a/pkg/search/options.go +++ b/pkg/search/options.go @@ -332,13 +332,13 @@ var ( Inactive = newFieldLabel("Inactive Deployment") // Risk Search Fields - RiskScore = newFieldLabel("Risk Score") - NodeRiskScore = newFieldLabel("Node Risk Score") - DeploymentRiskScore = newFieldLabel("Deployment Risk Score") - ImageRiskScore = newFieldLabel("Image Risk Score") - ComponentRiskScore = newFieldLabel("Component Risk Score") - RiskSubjectType = newFieldLabel("Risk Subject Type") - ComponentFromBaseImage = newFieldLabel("Component From Base Image") + RiskScore = newFieldLabel("Risk Score") + NodeRiskScore = newFieldLabel("Node Risk Score") + DeploymentRiskScore = newFieldLabel("Deployment Risk Score") + ImageRiskScore = newFieldLabel("Image Risk Score") + ComponentRiskScore = newFieldLabel("Component Risk Score") + RiskSubjectType = newFieldLabel("Risk Subject Type") + ComponentLayerType = newFieldLabel("Component Layer Type") PolicyLastUpdated = newFieldLabel("Policy Last Updated") diff --git a/proto/storage/image_component.proto b/proto/storage/image_component.proto index 9ed412debb3fc..30e5d434d9649 100644 --- a/proto/storage/image_component.proto +++ b/proto/storage/image_component.proto @@ -51,5 +51,12 @@ message ImageComponentV2 { string architecture = 13; string image_id_v2 = 14; // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" - bool from_base_image = 15; // @gotags: search:"Component From Base Image,hidden" + bool from_base_image = 15 [deprecated = true]; + + LayerType layer_type = 16; // @gotags: search:"Component Layer Type" +} + +enum LayerType { + APPLICATION = 0; + BASE_IMAGE = 1; } diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 533752ba7c9dc..a806224d90996 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -9521,6 +9521,20 @@ { "protopath": "image_component.proto", "def": { + "enums": [ + { + "name": "LayerType", + "enum_fields": [ + { + "name": "APPLICATION" + }, + { + "name": "BASE_IMAGE", + "integer": 1 + } + ] + } + ], "messages": [ { "name": "ImageComponent", @@ -9668,7 +9682,18 @@ { "id": 15, "name": "from_base_image", - "type": "bool" + "type": "bool", + "options": [ + { + "name": "deprecated", + "value": "true" + } + ] + }, + { + "id": 16, + "name": "layer_type", + "type": "LayerType" } ] } From 2143a778b0e05298e1a9a0f59431ddff2001437d Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Tue, 27 Jan 2026 16:26:59 +0100 Subject: [PATCH 033/232] ROX-32316: Add support for the new ACK to Sensor (#18228) --- compliance/compliance.go | 9 + .../compliance/node_inventory_handler_impl.go | 115 +++++++++- .../compliance/node_inventory_handler_test.go | 210 +++++++++++++++++- .../virtualmachine/index/handler_impl.go | 28 ++- .../virtualmachine/index/handler_impl_test.go | 91 +++++++- .../common/virtualmachine/metrics/metrics.go | 12 + 6 files changed, 453 insertions(+), 12 deletions(-) diff --git a/compliance/compliance.go b/compliance/compliance.go index b6f78f2b7ac08..d467ddf18ec37 100644 --- a/compliance/compliance.go +++ b/compliance/compliance.go @@ -365,6 +365,15 @@ func (c *Compliance) runRecv(ctx context.Context, client sensor.ComplianceServic default: log.Errorf("Unknown ACK Action: %s", t.Ack.GetAction()) } + case *sensor.MsgToCompliance_ComplianceAck: + complianceAck := t.ComplianceAck + log.Debugf("Received ComplianceACK: type=%s, action=%s, resource_id=%s, reason=%s", + complianceAck.GetMessageType(), + complianceAck.GetAction(), + complianceAck.GetResourceId(), + complianceAck.GetReason(), + ) + // TODO: Handle ComplianceACK message from Sensor/Central 4.10. default: utils.Should(errors.Errorf("Unhandled msg type: %T", t)) } diff --git a/sensor/common/compliance/node_inventory_handler_impl.go b/sensor/common/compliance/node_inventory_handler_impl.go index df37966bf0e6f..7c38e2463beb5 100644 --- a/sensor/common/compliance/node_inventory_handler_impl.go +++ b/sensor/common/compliance/node_inventory_handler_impl.go @@ -66,7 +66,7 @@ func (c *nodeInventoryHandlerImpl) Stopped() concurrency.ReadOnlyErrorSignal { } func (c *nodeInventoryHandlerImpl) Capabilities() []centralsensor.SensorCapability { - return nil + return []centralsensor.SensorCapability{centralsensor.SensorACKSupport} } // ResponsesC returns a channel with messages to Central. It must be called after Start() for the channel to be not nil @@ -115,15 +115,79 @@ func (c *nodeInventoryHandlerImpl) Notify(e common.SensorComponentEvent) { } func (c *nodeInventoryHandlerImpl) Accepts(msg *central.MsgToSensor) bool { - return msg.GetNodeInventoryAck() != nil + if msg.GetNodeInventoryAck() != nil { + return true + } + if sensorAck := msg.GetSensorAck(); sensorAck != nil { + switch sensorAck.GetMessageType() { + case central.SensorACK_NODE_INVENTORY, central.SensorACK_NODE_INDEX_REPORT: + return true + } + } + return false } func (c *nodeInventoryHandlerImpl) ProcessMessage(_ context.Context, msg *central.MsgToSensor) error { - ackMsg := msg.GetNodeInventoryAck() - if ackMsg == nil { + // Handle new SensorACK message (from Central 4.10+) + if sensorAck := msg.GetSensorAck(); sensorAck != nil { + return c.processSensorACK(sensorAck) + } + + // Handle legacy NodeInventoryACK message (from Central 4.9 and earlier) + if ackMsg := msg.GetNodeInventoryAck(); ackMsg != nil { + return c.processNodeInventoryACK(ackMsg) + } + + return nil +} + +// processSensorACK handles the new generic SensorACK message from Central. +// Only node-related ACK/NACK messages (NODE_INVENTORY, NODE_INDEX_REPORT) are forwarded to Compliance. +// All other message types are ignored - they should be handled by their respective handlers. +func (c *nodeInventoryHandlerImpl) processSensorACK(sensorAck *central.SensorACK) error { + log.Debugf("Received SensorACK message: type=%s, action=%s, resource_id=%s, reason=%s", + sensorAck.GetMessageType(), sensorAck.GetAction(), sensorAck.GetResourceId(), sensorAck.GetReason()) + + metrics.ObserveNodeScanningAck(sensorAck.GetResourceId(), + sensorAck.GetAction().String(), + sensorAck.GetMessageType().String(), + metrics.AckOperationReceive, + "", metrics.AckOriginSensor) + + // Only handle node-related message types - all others are handled by their respective handlers + var messageType sensor.MsgToCompliance_ComplianceACK_MessageType + switch sensorAck.GetMessageType() { + case central.SensorACK_NODE_INVENTORY: + messageType = sensor.MsgToCompliance_ComplianceACK_NODE_INVENTORY + case central.SensorACK_NODE_INDEX_REPORT: + messageType = sensor.MsgToCompliance_ComplianceACK_NODE_INDEX_REPORT + default: + // Not a node-related message - ignore it (handled by other handlers like VM handler) + log.Debugf("Ignoring SensorACK message type %s - not handled by node inventory handler", sensorAck.GetMessageType()) + return nil + } + + // Map central.SensorACK action to sensor.ComplianceACK action + var action sensor.MsgToCompliance_ComplianceACK_Action + switch sensorAck.GetAction() { + case central.SensorACK_ACK: + action = sensor.MsgToCompliance_ComplianceACK_ACK + case central.SensorACK_NACK: + action = sensor.MsgToCompliance_ComplianceACK_NACK + default: + log.Debugf("Ignoring SensorACK message with unknown action %s: type=%s, resource_id=%s, reason=%s", + sensorAck.GetAction(), sensorAck.GetMessageType(), sensorAck.GetResourceId(), sensorAck.GetReason()) return nil } - log.Debugf("Received node-scanning-ACK message of type %s, action %s for node %s", + + c.sendComplianceAckToCompliance(sensorAck.GetResourceId(), action, messageType, sensorAck.GetReason()) + return nil +} + +// processNodeInventoryACK handles the legacy NodeInventoryACK message from Central 4.9 and earlier. +// It forwards the ACK/NACK to Compliance using the legacy NodeInventoryACK message type. +func (c *nodeInventoryHandlerImpl) processNodeInventoryACK(ackMsg *central.NodeInventoryACK) error { + log.Debugf("Received legacy node-scanning-ACK message of type %s, action %s for node %s", ackMsg.GetMessageType(), ackMsg.GetAction(), ackMsg.GetNodeName()) metrics.ObserveNodeScanningAck(ackMsg.GetNodeName(), ackMsg.GetAction().String(), @@ -284,6 +348,47 @@ func (c *nodeInventoryHandlerImpl) sendAckToCompliance( reason, metrics.AckOriginSensor) } +// sendComplianceAckToCompliance sends the new ComplianceACK message to Compliance. +// This is used for the new SensorACK message from Central 4.10+. +func (c *nodeInventoryHandlerImpl) sendComplianceAckToCompliance( + resourceID string, + action sensor.MsgToCompliance_ComplianceACK_Action, + messageType sensor.MsgToCompliance_ComplianceACK_MessageType, + reason string, +) { + select { + case <-c.stopper.Flow().StopRequested(): + log.Debugf("Skipped sending ComplianceACK (stop requested): type=%s, action=%s, resource_id=%s, reason=%s", + messageType, action, resourceID, reason) + case c.toCompliance <- common.MessageToComplianceWithAddress{ + Msg: &sensor.MsgToCompliance{ + Msg: &sensor.MsgToCompliance_ComplianceAck{ + ComplianceAck: &sensor.MsgToCompliance_ComplianceACK{ + Action: action, + MessageType: messageType, + ResourceId: resourceID, + Reason: reason, + }, + }, + }, + Hostname: resourceID, // For node-based messages, resourceID is the node name + Broadcast: resourceID == "", + }: + log.Debugf("Sent ComplianceACK to Compliance: type=%s, action=%s, resource_id=%s, reason=%s", + messageType, action, resourceID, reason) + + // Record old metric for compatiblity. + // Note the new 'reason' is set in Central and is a string, not an enum, thus hardcoding here to 'forward'. + // The new metric for the SensorACK records the reason fully (as a string). + metrics.ObserveNodeScanningAck(resourceID, + action.String(), + messageType.String(), + metrics.AckOperationSend, + metrics.AckReasonForwardingFromCentral, + metrics.AckOriginSensor) + } +} + func (c *nodeInventoryHandlerImpl) sendNodeInventory(toC chan<- *message.ExpiringMessage, inventory *storage.NodeInventory) { if inventory == nil { return diff --git a/sensor/common/compliance/node_inventory_handler_test.go b/sensor/common/compliance/node_inventory_handler_test.go index fda05bb855ffc..ef2884645dcb6 100644 --- a/sensor/common/compliance/node_inventory_handler_test.go +++ b/sensor/common/compliance/node_inventory_handler_test.go @@ -12,6 +12,7 @@ import ( v4 "github.com/stackrox/rox/generated/internalapi/scanner/v4" "github.com/stackrox/rox/generated/internalapi/sensor" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/testutils/goleak" @@ -219,7 +220,9 @@ func (s *NodeInventoryHandlerTestSuite) TestCapabilities() { reports := make(chan *index.IndexReportWrap) defer close(reports) h := NewNodeInventoryHandler(inventories, reports, &mockAlwaysHitNodeIDMatcher{}, &mockRHCOSNodeMatcher{}) - s.Nil(h.Capabilities()) + caps := h.Capabilities() + s.Require().Len(caps, 1) + s.Equal(centralsensor.SensorACKSupport, caps[0]) } func (s *NodeInventoryHandlerTestSuite) TestResponsesCShouldPanicWhenNotStarted() { @@ -372,6 +375,211 @@ func (s *NodeInventoryHandlerTestSuite) TestHandlerCentralACKsToCompliance() { } +// TestHandlerSensorACKsToCompliance tests the new SensorACK message handling. +// Node-related SensorACK messages from Central 4.10+ should be forwarded to Compliance as ComplianceACK. +// Non-node messages (like VM_INDEX_REPORT) should be ignored. +func (s *NodeInventoryHandlerTestSuite) TestHandlerSensorACKsToCompliance() { + cases := map[string]struct { + sensorACK *central.SensorACK + shouldForward bool // true if message should be forwarded to Compliance + expectedAction sensor.MsgToCompliance_ComplianceACK_Action + expectedMessageType sensor.MsgToCompliance_ComplianceACK_MessageType + expectedReason string + expectedHostname string + expectedBroadcast bool + }{ + "NODE_INVENTORY ACK should be forwarded": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INVENTORY, + ResourceId: "node-1", + }, + shouldForward: true, + expectedAction: sensor.MsgToCompliance_ComplianceACK_ACK, + expectedMessageType: sensor.MsgToCompliance_ComplianceACK_NODE_INVENTORY, + }, + "NODE_INVENTORY NACK should be forwarded with reason": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_NACK, + MessageType: central.SensorACK_NODE_INVENTORY, + ResourceId: "node-1", + Reason: "some failure reason", + }, + shouldForward: true, + expectedAction: sensor.MsgToCompliance_ComplianceACK_NACK, + expectedMessageType: sensor.MsgToCompliance_ComplianceACK_NODE_INVENTORY, + expectedReason: "some failure reason", + }, + "NODE_INDEX_REPORT ACK should be forwarded": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-2", + }, + shouldForward: true, + expectedAction: sensor.MsgToCompliance_ComplianceACK_ACK, + expectedMessageType: sensor.MsgToCompliance_ComplianceACK_NODE_INDEX_REPORT, + }, + "NODE_INDEX_REPORT NACK should be forwarded with reason": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_NACK, + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-2", + Reason: "index failure", + }, + shouldForward: true, + expectedAction: sensor.MsgToCompliance_ComplianceACK_NACK, + expectedMessageType: sensor.MsgToCompliance_ComplianceACK_NODE_INDEX_REPORT, + expectedReason: "index failure", + }, + "NODE_INDEX_REPORT with unknown action should be ignored": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_Action(999), + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-2", + }, + shouldForward: false, + }, + "Broadcast NODE_INVENTORY ACK should set broadcast": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INVENTORY, + ResourceId: "", + }, + shouldForward: true, + expectedAction: sensor.MsgToCompliance_ComplianceACK_ACK, + expectedMessageType: sensor.MsgToCompliance_ComplianceACK_NODE_INVENTORY, + expectedHostname: "", + expectedBroadcast: true, + }, + "VM_INDEX_REPORT ACK should be ignored": { + sensorACK: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_VM_INDEX_REPORT, + ResourceId: "vm-1", + }, + shouldForward: false, + }, + } + + for name, tc := range cases { + s.Run(name, func() { + ch := make(chan *storage.NodeInventory) + defer close(ch) + reports := make(chan *index.IndexReportWrap) + defer close(reports) + handler := NewNodeInventoryHandler(ch, reports, &mockAlwaysHitNodeIDMatcher{}, &mockRHCOSNodeMatcher{}) + s.NoError(handler.Start()) + handler.Notify(common.SensorComponentEventCentralReachable) + + if tc.shouldForward { + // Start a goroutine to receive the ComplianceACK before sending + // (channel is unbuffered so we need a receiver ready) + msgCh := make(chan common.MessageToComplianceWithAddress, 1) + errCh := make(chan error, 1) + go func() { + select { + case msg := <-handler.ComplianceC(): + msgCh <- msg + case <-time.After(3 * time.Second): + errCh <- errors.New("ComplianceACK message not received within 3 seconds") + } + }() + + // Send the SensorACK message + err := handler.ProcessMessage(s.T().Context(), ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: tc.sensorACK}, + }) + s.NoError(err) + + select { + case err := <-errCh: + s.Fail(err.Error()) + case msg := <-msgCh: + // Verify ComplianceACK was sent to Compliance + complianceAck := msg.Msg.GetComplianceAck() + s.Require().NotNil(complianceAck, "Expected ComplianceACK message") + s.Equal(tc.expectedAction, complianceAck.GetAction()) + s.Equal(tc.expectedMessageType, complianceAck.GetMessageType()) + s.Equal(tc.sensorACK.GetResourceId(), complianceAck.GetResourceId()) + s.Equal(tc.expectedReason, complianceAck.GetReason()) + if tc.expectedHostname != "" || tc.expectedBroadcast { + s.Equal(tc.expectedHostname, msg.Hostname) + s.Equal(tc.expectedBroadcast, msg.Broadcast) + } else { + s.Equal(tc.sensorACK.GetResourceId(), msg.Hostname) + s.False(msg.Broadcast) + } + } + + } else { + // Send the SensorACK message + err := handler.ProcessMessage(s.T().Context(), ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: tc.sensorACK}, + }) + s.NoError(err) + + // Verify no message arrives within the timeout. + select { + case msg := <-handler.ComplianceC(): + s.Failf("Message should not be forwarded to Compliance", "got: %v", msg) + case <-time.After(20 * time.Millisecond): + // Expected: nothing received. + } + } + + handler.Stop() + s.NoError(handler.Stopped().Wait()) + }) + } +} + +// TestHandlerAcceptsBothAckTypes tests that the handler accepts both legacy NodeInventoryACK +// and new SensorACK message types. +func (s *NodeInventoryHandlerTestSuite) TestHandlerAcceptsBothAckTypes() { + ch := make(chan *storage.NodeInventory) + defer close(ch) + reports := make(chan *index.IndexReportWrap) + defer close(reports) + handler := NewNodeInventoryHandler(ch, reports, &mockAlwaysHitNodeIDMatcher{}, &mockRHCOSNodeMatcher{}) + + // Test legacy NodeInventoryACK + legacyMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_NodeInventoryAck{NodeInventoryAck: ¢ral.NodeInventoryACK{ + ClusterId: "cluster-1", + NodeName: "node-1", + Action: central.NodeInventoryACK_ACK, + }}, + } + s.True(handler.Accepts(legacyMsg), "Handler should accept legacy NodeInventoryACK") + + // Test new SensorACK for node-related messages + nodeAckMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-1", + }}, + } + s.True(handler.Accepts(nodeAckMsg), "Handler should accept SensorACK for node messages") + + // Test SensorACK for VM messages (should be handled by VM handler, not accepted here) + vmAckMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_VM_INDEX_REPORT, + ResourceId: "vm-1", + }}, + } + s.False(handler.Accepts(vmAckMsg), "Handler should not accept SensorACK for VM messages") + + // Test message without ACK + otherMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_ClusterConfig{}, + } + s.False(handler.Accepts(otherMsg), "Handler should not accept other message types") +} + // This test simulates a running Sensor loosing connection to Central, followed by a reconnect. // As soon as Sensor enters offline mode, it should send NACKs to Compliance. // In online mode, inventories are forwarded to Central, which responds with an ACK, that is passed to Compliance. diff --git a/sensor/common/virtualmachine/index/handler_impl.go b/sensor/common/virtualmachine/index/handler_impl.go index 797542c34ebf7..356e91a705535 100644 --- a/sensor/common/virtualmachine/index/handler_impl.go +++ b/sensor/common/virtualmachine/index/handler_impl.go @@ -111,12 +111,34 @@ func (h *handlerImpl) Notify(e common.SensorComponentEvent) { } func (h *handlerImpl) Accepts(msg *central.MsgToSensor) bool { + if sensorAck := msg.GetSensorAck(); sensorAck != nil { + return sensorAck.GetMessageType() == central.SensorACK_VM_INDEX_REPORT + } return false } -// ProcessMessage is a no-op because Sensor does not receive any virtual machine data -// from Central. -func (h *handlerImpl) ProcessMessage(ctx context.Context, msg *central.MsgToSensor) error { +// ProcessMessage handles SensorACK messages for VM index reports. +func (h *handlerImpl) ProcessMessage(_ context.Context, msg *central.MsgToSensor) error { + sensorAck := msg.GetSensorAck() + if sensorAck == nil || sensorAck.GetMessageType() != central.SensorACK_VM_INDEX_REPORT { + return nil + } + + vmID := sensorAck.GetResourceId() + action := sensorAck.GetAction() + reason := sensorAck.GetReason() + + switch action { + case central.SensorACK_ACK: + log.Debugf("Received ACK from Central for VM index report: vm_id=%s", vmID) + metrics.IndexReportAcksReceived.WithLabelValues(action.String()).Inc() + case central.SensorACK_NACK: + log.Warnf("Received NACK from Central for VM index report: vm_id=%s, reason=%s", vmID, reason) + metrics.IndexReportAcksReceived.WithLabelValues(action.String()).Inc() + // TODO(ROX-xxxxx): Implement retry logic or notifying VM relay. + // Currently, the VM relay has its own retry mechanism, but it's not aware of Central's rate limiting. + } + return nil } diff --git a/sensor/common/virtualmachine/index/handler_impl_test.go b/sensor/common/virtualmachine/index/handler_impl_test.go index 50dcb07c03238..549ce650f0947 100644 --- a/sensor/common/virtualmachine/index/handler_impl_test.go +++ b/sensor/common/virtualmachine/index/handler_impl_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stackrox/rox/generated/internalapi/central" v1 "github.com/stackrox/rox/generated/internalapi/virtualmachine/v1" "github.com/stackrox/rox/pkg/centralsensor" @@ -16,6 +17,7 @@ import ( "github.com/stackrox/rox/sensor/common/centralcaps" "github.com/stackrox/rox/sensor/common/virtualmachine" "github.com/stackrox/rox/sensor/common/virtualmachine/index/mocks" + vmmetrics "github.com/stackrox/rox/sensor/common/virtualmachine/metrics" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" ) @@ -223,10 +225,93 @@ func (s *virtualMachineHandlerSuite) TestCapabilities() { s.Require().Empty(caps) } +func (s *virtualMachineHandlerSuite) TestAccepts() { + // Should accept SensorACK with VM_INDEX_REPORT type + vmAckMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_VM_INDEX_REPORT, + ResourceId: "vm-1", + }}, + } + s.Assert().True(s.handler.Accepts(vmAckMsg), "Handler should accept SensorACK for VM_INDEX_REPORT") + + // Should not accept SensorACK with other types + nodeAckMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-1", + }}, + } + s.Assert().False(s.handler.Accepts(nodeAckMsg), "Handler should not accept SensorACK for NODE_INDEX_REPORT") + + // Should not accept other message types + otherMsg := ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_ClusterConfig{}, + } + s.Assert().False(s.handler.Accepts(otherMsg), "Handler should not accept other message types") +} + func (s *virtualMachineHandlerSuite) TestProcessMessage() { - msg := ¢ral.MsgToSensor{} - err := s.handler.ProcessMessage(context.Background(), msg) - s.Require().NoError(err) + ctx := context.Background() + + getMetric := func(label string) float64 { + return testutil.ToFloat64(vmmetrics.IndexReportAcksReceived.WithLabelValues(label)) + } + + cases := map[string]struct { + msg *central.MsgToSensor + expectAck int + expectNack int + }{ + "ack increments ack metric": { + msg: ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_VM_INDEX_REPORT, + ResourceId: "vm-ack", + }}, + }, + expectAck: 1, + expectNack: 0, + }, + "nack increments nack metric": { + msg: ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_NACK, + MessageType: central.SensorACK_VM_INDEX_REPORT, + ResourceId: "vm-nack", + Reason: "rate limited", + }}, + }, + expectAck: 0, + expectNack: 1, + }, + "non-VM message does not change metrics": { + msg: ¢ral.MsgToSensor{ + Msg: ¢ral.MsgToSensor_SensorAck{SensorAck: ¢ral.SensorACK{ + Action: central.SensorACK_ACK, + MessageType: central.SensorACK_NODE_INDEX_REPORT, + ResourceId: "node-1", + }}, + }, + expectAck: 0, + expectNack: 0, + }, + } + + for name, tc := range cases { + s.Run(name, func() { + initialAck := getMetric(central.SensorACK_ACK.String()) + initialNack := getMetric(central.SensorACK_NACK.String()) + + err := s.handler.ProcessMessage(ctx, tc.msg) + s.Require().NoError(err) + s.Equal(initialAck+float64(tc.expectAck), getMetric(central.SensorACK_ACK.String())) + s.Equal(initialNack+float64(tc.expectNack), getMetric(central.SensorACK_NACK.String())) + }) + } } func (s *virtualMachineHandlerSuite) TestResponsesC_BeforeStart() { diff --git a/sensor/common/virtualmachine/metrics/metrics.go b/sensor/common/virtualmachine/metrics/metrics.go index a7dd57334f451..b2794d76e4e21 100644 --- a/sensor/common/virtualmachine/metrics/metrics.go +++ b/sensor/common/virtualmachine/metrics/metrics.go @@ -115,6 +115,17 @@ var VMDiscoveredData = prometheus.NewCounterVec( []string{"detected_os", "activation_status", "dnf_metadata_status"}, ) +// IndexReportAcksReceived counts ACK/NACK responses received from Central for VM index reports. +var IndexReportAcksReceived = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metrics.PrometheusNamespace, + Subsystem: metrics.SensorSubsystem.String(), + Name: "virtual_machine_index_report_acks_received_total", + Help: "Total number of ACK/NACK responses received from Central for VM index reports", + }, + []string{"action"}, // "ACK" or "NACK" +) + func init() { prometheus.MustRegister( IndexReportsReceived, @@ -124,5 +135,6 @@ func init() { IndexReportBlockingEnqueueDurationMilliseconds, IndexReportEnqueueBlockedTotal, VMDiscoveredData, + IndexReportAcksReceived, ) } From 585e1a28e679c8b73e60faeb4a9a463c8926a9fc Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 27 Jan 2026 16:49:09 +0100 Subject: [PATCH 034/232] ROX-32453: fix auth error formatting (#18691) --- sensor/common/centralproxy/authorizer.go | 16 +++++++++++----- sensor/common/centralproxy/authorizer_test.go | 5 ++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/sensor/common/centralproxy/authorizer.go b/sensor/common/centralproxy/authorizer.go index dd418e0f34d07..2c212ea0f6a9b 100644 --- a/sensor/common/centralproxy/authorizer.go +++ b/sensor/common/centralproxy/authorizer.go @@ -76,17 +76,23 @@ func newK8sAuthorizer(client kubernetes.Interface) *k8sAuthorizer { // formatForbiddenErr creates a consistent forbidden error message for authorization failures. func formatForbiddenErr(user, verb, resource, group, namespace string) error { - if namespace == "" { + // Format as resource.group using "core" for empty group. + qualifiedResource := resource + "." + group + if group == "" { + qualifiedResource = resource + ".core" + } + + if namespace == FullClusterAccessScope { return pkghttputil.Errorf( http.StatusForbidden, - "user %s lacks cluster-wide %s permission for resource %s in group %s", - user, verb, resource, group, + "user %q lacks cluster-wide %s permission for resource %q", + user, verb, qualifiedResource, ) } return pkghttputil.Errorf( http.StatusForbidden, - "user %s lacks %s permission for resource %s in group %s in namespace %s", - user, verb, resource, group, namespace, + "user %q lacks %s permission for resource %q in namespace %q", + user, verb, qualifiedResource, namespace, ) } diff --git a/sensor/common/centralproxy/authorizer_test.go b/sensor/common/centralproxy/authorizer_test.go index 234f476b5cc7f..f2626476fe018 100644 --- a/sensor/common/centralproxy/authorizer_test.go +++ b/sensor/common/centralproxy/authorizer_test.go @@ -196,8 +196,7 @@ func TestK8sAuthorizer_MissingPermission_Namespace(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), "lacks list permission for resource") - assert.Contains(t, err.Error(), "in namespace my-namespace") + assert.Contains(t, err.Error(), `user "limited-user" lacks list permission for resource "pods.core" in namespace "my-namespace"`) } func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { @@ -232,7 +231,7 @@ func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), "lacks list permission") + assert.Contains(t, err.Error(), `user "namespace-admin" lacks cluster-wide list permission for resource "pods.core"`) } func TestK8sAuthorizer_SubjectAccessReviewError(t *testing.T) { From 312be49f8494fe6636ee55f2d5458039847c2e16 Mon Sep 17 00:00:00 2001 From: David Vail Date: Tue, 27 Jan 2026 10:55:34 -0500 Subject: [PATCH 035/232] ROX-32834: Fix handling of ApolloError error array (#18685) --- .../ConsolePlugin/consoleFetchAxiosAdapter.ts | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/ui/apps/platform/src/ConsolePlugin/consoleFetchAxiosAdapter.ts b/ui/apps/platform/src/ConsolePlugin/consoleFetchAxiosAdapter.ts index 601d9682bcc54..0aa08791d0898 100644 --- a/ui/apps/platform/src/ConsolePlugin/consoleFetchAxiosAdapter.ts +++ b/ui/apps/platform/src/ConsolePlugin/consoleFetchAxiosAdapter.ts @@ -35,7 +35,7 @@ export default function consoleFetchAxiosAdapter( body: config.data, headers: updatedHeaders, }) - .then(async (response) => { + .then(async (response: Response) => { const data = await response.text(); // GraphQL request errors are JSON objects with an `errors` field an a HTTP status code of 200, so we @@ -57,16 +57,26 @@ export default function consoleFetchAxiosAdapter( statusText: response.statusText, }; }) - .catch(async (error) => { - const { status, statusText, headers, config } = error.response; - const text = await error.response.text(); - const convertedResponse = { status, statusText, headers, config, data: text }; - // Preserve original error context by passing the original error object and stack trace - const axiosError: AxiosError & { originalError?: Error; backendMessage?: string } = - new AxiosError(error.message, status, config, undefined, convertedResponse); - axiosError.stack = error.stack; - // Attach the original error for further debugging if needed - axiosError.originalError = error; - throw axiosError; + .catch(async (error: unknown) => { + if ( + typeof error === 'object' && + error !== null && + 'response' in error && + error.response instanceof Response + ) { + // If the error contains response information for an HTTP 4xx or 5xx error, we can extract the message from the response body + const { status, statusText } = error.response; + const text = await error.response.text(); + const headers = new AxiosHeaders(); + const axiosResponse = { status, statusText, headers, config, data: text }; + // Preserve original error context by passing the original error object and response information + throw new AxiosError(text, `${status}`, { headers }, undefined, axiosResponse); + } + + if (error instanceof Error) { + throw error; + } + + throw new Error(String(error)); }); } From 150538e46656ee3db47c012a60e0333d2b477e6c Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Tue, 27 Jan 2026 17:10:24 +0100 Subject: [PATCH 036/232] ROX-32838: Enable ConsolePlugin only for OCP 4.19+ (#18693) --- .../templates/console-plugin.yaml.htpl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/image/templates/helm/stackrox-secured-cluster/templates/console-plugin.yaml.htpl b/image/templates/helm/stackrox-secured-cluster/templates/console-plugin.yaml.htpl index 8712f530f78dc..0ed15db2d8d0f 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/console-plugin.yaml.htpl +++ b/image/templates/helm/stackrox-secured-cluster/templates/console-plugin.yaml.htpl @@ -1,7 +1,13 @@ [<- if and .FeatureFlags.ROX_OCP_CONSOLE_INTEGRATION (not .KubectlOutput) >] {{- include "srox.init" . -}} -{{- if and (eq ._rox.env.installMethod "operator") ._rox.consolePlugin.enabled -}} +{{- /* + ConsolePlugin is only deployed on OpenShift 4.19+ (Kubernetes 1.32+). + For the mapping between OpenShift and Kubernetes versions, see: + https://access.redhat.com/solutions/4870701 +*/ -}} +{{- $kubeVersion := semver $.Capabilities.KubeVersion.Version -}} +{{- if and (eq ._rox.env.installMethod "operator") ._rox.consolePlugin.enabled (ge (int $kubeVersion.Minor) 32) -}} apiVersion: console.openshift.io/v1 kind: ConsolePlugin metadata: From 2def3dc2ef71d56166582f128129777daf33d4fd Mon Sep 17 00:00:00 2001 From: Brad Rogers <61400697+bradr5@users.noreply.github.com> Date: Tue, 27 Jan 2026 11:00:52 -0600 Subject: [PATCH 037/232] ROX-32360: Populate Virtual Machine details (#18682) --- .../VirtualMachine/VirtualMachinePage.tsx | 33 ++++++++---- .../VirtualMachinePageComponents.tsx | 22 ++++---- .../VirtualMachinePageDetails.tsx | 54 ++++++++++++++++--- .../VirtualMachinePageHeader.tsx | 14 ++--- .../VirtualMachinePageVulnerabilities.tsx | 27 +++++----- .../src/services/VirtualMachineService.ts | 24 +++------ .../platform/src/types/scanComponent.proto.ts | 15 ------ ui/apps/platform/src/types/vulnState.proto.ts | 3 -- .../platform/src/types/vulnerability.proto.ts | 20 ------- 9 files changed, 108 insertions(+), 104 deletions(-) delete mode 100644 ui/apps/platform/src/types/vulnState.proto.ts diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePage.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePage.tsx index a1237775ad383..04ba9dae1044e 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePage.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePage.tsx @@ -33,10 +33,12 @@ import { } from '../../utils/sortFields'; import VirtualMachinePageHeader from './VirtualMachinePageHeader'; import VirtualMachinePageComponents from './VirtualMachinePageComponents'; +import VirtualMachinePageDetails from './VirtualMachinePageDetails'; import VirtualMachinePageVulnerabilities from './VirtualMachinePageVulnerabilities'; const VULNERABILITIES_TAB_ID = 'vulnerabilities-tab-content'; const COMPONENTS_TAB_ID = 'components-tab-content'; +const DETAILS_TAB_ID = 'details-tab-content'; const virtualMachineCveOverviewPath = getOverviewPagePath('VirtualMachine', { entityTab: 'VirtualMachine', @@ -72,14 +74,15 @@ function VirtualMachinePage() { [virtualMachineId] ); - const { data: virtualMachineData, isLoading, error } = useRestQuery(fetchVirtualMachine); + const { data: virtualMachine, isLoading, error } = useRestQuery(fetchVirtualMachine); const [activeTabKey, setActiveTabKey] = useURLStringUnion('detailsTab', detailsTabValues); const vulnTabKey = detailsTabValues[0]; const componentsTabKey = detailsTabValues[4]; + const detailsTabKey = detailsTabValues[1]; - const virtualMachineName = virtualMachineData?.name; + const virtualMachineName = virtualMachine?.name; function onTabChange(value: string | number) { if (value === componentsTabKey) { @@ -113,7 +116,7 @@ function VirtualMachinePage() { @@ -136,6 +139,11 @@ function VirtualMachinePage() { tabContentId={COMPONENTS_TAB_ID} title={componentsTabKey} /> + @@ -145,6 +153,8 @@ function VirtualMachinePage() { 'Prioritize and remediate observed CVEs for this virtual machine'} {activeTabKey === componentsTabKey && 'View all components from this virtual machine'} + {activeTabKey === detailsTabKey && + 'View details about this virtual machine'} @@ -159,9 +169,9 @@ function VirtualMachinePage() { {activeTabKey === vulnTabKey && ( )} + {activeTabKey === detailsTabKey && ( + + + + )} ); diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx index ddc164b414e6c..6b9021bc34bb2 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx @@ -51,9 +51,9 @@ export const attributeForScannable: SelectSearchFilterAttribute = { }; export type VirtualMachinePageComponentsProps = { - virtualMachineData: VirtualMachine | undefined; - isLoadingVirtualMachineData: boolean; - errorVirtualMachineData: Error | undefined; + virtualMachine: VirtualMachine | undefined; + isLoadingVirtualMachine: boolean; + errorVirtualMachine: Error | undefined; urlSearch: UseUrlSearchReturn; urlSorting: UseURLSortResult; urlPagination: UseURLPaginationResult; @@ -62,9 +62,9 @@ export type VirtualMachinePageComponentsProps = { const searchFilterConfig = [virtualMachineComponentSearchFilterConfig]; function VirtualMachinePageComponents({ - virtualMachineData, - isLoadingVirtualMachineData, - errorVirtualMachineData, + virtualMachine, + isLoadingVirtualMachine, + errorVirtualMachine, urlSearch, urlSorting, urlPagination, @@ -76,8 +76,8 @@ function VirtualMachinePageComponents({ const isFiltered = getHasSearchApplied(searchFilter); const virtualMachineComponentsTableData = useMemo( - () => getVirtualMachineComponentsTableData(virtualMachineData), - [virtualMachineData] + () => getVirtualMachineComponentsTableData(virtualMachine), + [virtualMachine] ); const filteredVirtualMachineComponentsTableData = useMemo( @@ -110,9 +110,9 @@ function VirtualMachinePageComponents({ }, [sortedVirtualMachineComponentsTableData, page, perPage]); const tableState = getTableUIState({ - isLoading: isLoadingVirtualMachineData, + isLoading: isLoadingVirtualMachine, data: paginatedVirtualMachineComponentsTableData, - error: errorVirtualMachineData, + error: errorVirtualMachine, searchFilter, }); @@ -167,7 +167,7 @@ function VirtualMachinePageComponents({ - {!isLoadingVirtualMachineData ? ( + {!isLoadingVirtualMachine ? ( `${pluralize(filteredVirtualMachineComponentsTableData.length, 'result')} found` ) : ( <Skeleton screenreaderText="Loading virtual machine vulnerability count" /> diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageDetails.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageDetails.tsx index 5ff22a4f1a8be..e39e126013468 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageDetails.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageDetails.tsx @@ -1,16 +1,56 @@ -import { PageSection } from '@patternfly/react-core'; -import EmptyStateTemplate from 'Components/EmptyStateTemplate'; +import { + DescriptionList, + DescriptionListDescription, + DescriptionListGroup, + DescriptionListTerm, + PageSection, +} from '@patternfly/react-core'; +import capitalize from 'lodash/capitalize'; + +import type { VirtualMachine } from 'services/VirtualMachineService'; export type VirtualMachinePageDetailsProps = { - virtualMachineId: string; + virtualMachine: VirtualMachine | undefined; }; -function VirtualMachinePageDetails({ virtualMachineId }: VirtualMachinePageDetailsProps) { +function VirtualMachinePageDetails({ virtualMachine }: VirtualMachinePageDetailsProps) { + const facts = virtualMachine?.facts ?? {}; return ( <PageSection variant="light" isFilled padding={{ default: 'padding' }}> - <EmptyStateTemplate title="Virtual Machine Details" headingLevel="h2"> - Virtual machine details content will be implemented here for {virtualMachineId}. - </EmptyStateTemplate> + <DescriptionList> + <DescriptionListGroup> + <DescriptionListTerm>Status</DescriptionListTerm> + <DescriptionListDescription> + {capitalize(virtualMachine?.state)} + </DescriptionListDescription> + </DescriptionListGroup> + <DescriptionListGroup> + <DescriptionListTerm>Operating System</DescriptionListTerm> + <DescriptionListDescription>{facts.guestOS || '-'}</DescriptionListDescription> + </DescriptionListGroup> + <DescriptionListGroup> + <DescriptionListTerm>IP Addresses</DescriptionListTerm> + <DescriptionListDescription> + {facts.ipAddresses || '-'} + </DescriptionListDescription> + </DescriptionListGroup> + <DescriptionListGroup> + <DescriptionListTerm>Node</DescriptionListTerm> + <DescriptionListDescription>{facts.nodeName || '-'}</DescriptionListDescription> + </DescriptionListGroup> + <DescriptionListGroup> + <DescriptionListTerm>Pods</DescriptionListTerm> + <DescriptionListDescription> + {facts.activePods || '-'} + </DescriptionListDescription> + </DescriptionListGroup> + <DescriptionListGroup> + <DescriptionListTerm>Boot Order</DescriptionListTerm> + <DescriptionListDescription> + {facts.bootOrder || '-'} + </DescriptionListDescription> + </DescriptionListGroup> + </DescriptionList> </PageSection> ); } diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx index 144d96c8360da..a05445314851b 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx @@ -8,13 +8,13 @@ import { getAxiosErrorMessage } from 'utils/responseErrorUtils'; import HeaderLoadingSkeleton from '../../components/HeaderLoadingSkeleton'; export type VirtualMachinePageHeaderProps = { - virtualMachineData: VirtualMachine | undefined; + virtualMachine: VirtualMachine | undefined; isLoading: boolean; error: Error | undefined; }; function VirtualMachinePageHeader({ - virtualMachineData, + virtualMachine, isLoading, error, }: VirtualMachinePageHeaderProps) { @@ -40,22 +40,22 @@ function VirtualMachinePageHeader({ ); } - if (!virtualMachineData) { + if (!virtualMachine) { return null; } return ( <Flex direction={{ default: 'column' }} alignItems={{ default: 'alignItemsFlexStart' }}> <Flex alignItems={{ default: 'alignItemsCenter' }}> - <Title headingLevel="h1">{virtualMachineData.name} + {virtualMachine.name} - {virtualMachineData.scan?.scanTime && ( - + {virtualMachine.scan?.scanTime && ( + )} diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx index d7adcfd727e79..eaac427d5859e 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx @@ -48,9 +48,9 @@ import VirtualMachineVulnerabilitiesTable, { // Currently we need all vm info to be fetched in the root component, hence this being passed in // there will likely be a call specific to this table in the future that should be made here export type VirtualMachinePageVulnerabilitiesProps = { - virtualMachineData: VirtualMachine | undefined; - isLoadingVirtualMachineData: boolean; - errorVirtualMachineData: Error | undefined; + virtualMachine: VirtualMachine | undefined; + isLoadingVirtualMachine: boolean; + errorVirtualMachine: Error | undefined; urlSearch: UseUrlSearchReturn; urlSorting: UseURLSortResult; urlPagination: UseURLPaginationResult; @@ -62,9 +62,9 @@ const searchFilterConfig = [ ]; function VirtualMachinePageVulnerabilities({ - virtualMachineData, - isLoadingVirtualMachineData, - errorVirtualMachineData, + virtualMachine, + isLoadingVirtualMachine, + errorVirtualMachine, urlSearch, urlSorting, urlPagination, @@ -80,8 +80,8 @@ function VirtualMachinePageVulnerabilities({ const managedColumnState = useManagedColumns(tableId, defaultColumns); const virtualMachineTableData = useMemo( - () => getVirtualMachineCveTableData(virtualMachineData), - [virtualMachineData] + () => getVirtualMachineCveTableData(virtualMachine), + [virtualMachine] ); const filteredVirtualMachineTableData = useMemo( @@ -110,9 +110,9 @@ function VirtualMachinePageVulnerabilities({ }, [sortedVirtualMachineTableData, page, perPage]); const tableState = getTableUIState({ - isLoading: isLoadingVirtualMachineData, + isLoading: isLoadingVirtualMachine, data: paginatedVirtualMachineTableData, - error: errorVirtualMachineData, + error: errorVirtualMachine, searchFilter, }); @@ -133,10 +133,7 @@ function VirtualMachinePageVulnerabilities({ setPage(1, 'replace'); }} /> - + - {!isLoadingVirtualMachineData ? ( + {!isLoadingVirtualMachine ? ( `${pluralize(filteredVirtualMachineTableData.length, 'result')} found` ) : ( <Skeleton screenreaderText="Loading virtual machine vulnerability count" /> diff --git a/ui/apps/platform/src/services/VirtualMachineService.ts b/ui/apps/platform/src/services/VirtualMachineService.ts index 959c0a53039ed..cc34c8de8ef55 100644 --- a/ui/apps/platform/src/services/VirtualMachineService.ts +++ b/ui/apps/platform/src/services/VirtualMachineService.ts @@ -3,39 +3,29 @@ import type { ScanComponent } from 'types/scanComponent.proto'; import type { SearchQueryOptions } from 'types/search'; import { buildNestedRawQueryParams } from './ComplianceCommon'; +type VirtualMachineState = 'UNKNOWN' | 'STOPPED' | 'RUNNING'; + export type VirtualMachine = { id: string; namespace: string; name: string; clusterId: string; clusterName: string; - facts: Record<string, string>; + facts?: Record<string, string>; scan?: VirtualMachineScan; lastUpdated: string; // ISO 8601 date string + vsockCid: number; + state: VirtualMachineState; }; type VirtualMachineScan = { - scannerVersion: string; scanTime: string; // ISO 8601 date string + operatingSystem: string; components: ScanComponent[]; - dataSource: DataSource; notes: VirtualMachineScanNote[]; }; -type VirtualMachineScanNote = - | 'UNSET' - | 'OS_UNAVAILABLE' - | 'PARTIAL_SCAN_DATA' - | 'OS_CVES_UNAVAILABLE' - | 'OS_CVES_STALE' - | 'LANGUAGE_CVES_UNAVAILABLE' - | 'CERTIFIED_RHEL_SCAN_UNAVAILABLE'; - -type DataSource = { - id: string; - name: string; - mirror: string; -}; +type VirtualMachineScanNote = 'UNSET' | 'OS_UNKNOWN' | 'OS_UNSUPPORTED'; export type ListVirtualMachinesResponse = { virtualMachines: VirtualMachine[]; diff --git a/ui/apps/platform/src/types/scanComponent.proto.ts b/ui/apps/platform/src/types/scanComponent.proto.ts index 09d4e8fc1249f..8103bd8b2572e 100644 --- a/ui/apps/platform/src/types/scanComponent.proto.ts +++ b/ui/apps/platform/src/types/scanComponent.proto.ts @@ -5,23 +5,14 @@ import type { EmbeddedVulnerability } from 'types/vulnerability.proto'; export type ScanComponent = { name: string; version: string; - license: License; vulns: EmbeddedVulnerability[]; source: SourceType; - location: string; topCvss?: number; riskScore: number; - fixedBy: string; - executables: ScanComponentExecutable[]; architecture: string; notes: Note[]; }; -type ScanComponentExecutable = { - path: string; - dependencies: string[]; -}; - type Note = 'UNSPECIFIED' | 'UNSCANNED'; export type SourceType = @@ -33,9 +24,3 @@ export type SourceType = | 'GO' | 'DOTNETCORERUNTIME' | 'INFRASTRUCTURE'; - -type License = { - name: string; - type: string; - url: string; -}; diff --git a/ui/apps/platform/src/types/vulnState.proto.ts b/ui/apps/platform/src/types/vulnState.proto.ts deleted file mode 100644 index bda418640f6a7..0000000000000 --- a/ui/apps/platform/src/types/vulnState.proto.ts +++ /dev/null @@ -1,3 +0,0 @@ -/** ===== Based on v2/vuln_state.proto ===== */ - -export type VulnerabilityState = 'OBSERVED' | 'DEFERRED' | 'FALSE_POSITIVE'; diff --git a/ui/apps/platform/src/types/vulnerability.proto.ts b/ui/apps/platform/src/types/vulnerability.proto.ts index ead00819cfc68..2ae1dad495d2e 100644 --- a/ui/apps/platform/src/types/vulnerability.proto.ts +++ b/ui/apps/platform/src/types/vulnerability.proto.ts @@ -1,5 +1,3 @@ -import type { VulnerabilityState } from 'types/vulnState.proto'; - /** ===== Based on v2/vulnerability.proto ===== */ export type EmbeddedVulnerability = { @@ -9,32 +7,14 @@ export type EmbeddedVulnerability = { summary: string; link: string; fixedBy?: string; - cvssV3: CVSSV3; publishedOn: string; // ISO 8601 date string; lastModified: string; // ISO 8601 date string; - vulnerabilityType: VulnerabilityType; - vulnerabilityTypes: VulnerabilityType[]; - suppressed: boolean; - suppressActivation: string; // ISO 8601 date string; - suppressExpiry: string; // ISO 8601 date string; firstSystemOccurrence: string; // ISO 8601 date string; - firstImageOccurrence: string; // ISO 8601 date string; severity: VulnerabilitySeverity; - state: VulnerabilityState; cvssMetrics: CVSSScore[]; - nvdCvss: number; epss: EPSS; }; -type VulnerabilityType = - | 'UNKNOWN_VULNERABILITY' - | 'IMAGE_VULNERABILITY' - | 'K8S_VULNERABILITY' - | 'ISTIO_VULNERABILITY' - | 'NODE_VULNERABILITY' - | 'OPENSHIFT_VULNERABILITY' - | 'VIRTUAL_MACHINE_VULNERABILITY'; - type VulnerabilitySeverity = | 'UNKNOWN_VULNERABILITY_SEVERITY' | 'LOW_VULNERABILITY_SEVERITY' From f55d23a5baefd85c7c2ad0ae0d26d820ac08a9d6 Mon Sep 17 00:00:00 2001 From: Giles Hutton <ghutton@redhat.com> Date: Tue, 27 Jan 2026 19:28:15 +0000 Subject: [PATCH 038/232] chore(fim): further tidy up and UI tweaks (#18698) --- .../violationmessages/printer/file_access.go | 20 +++++++- .../printer/file_access_test.go | 24 +++++----- pkg/fixtures/alert.go | 3 +- pkg/notifiers/format.go | 6 ++- pkg/notifiers/format_test.go | 48 +++++++++++++++++-- .../Details/FileAccessCardContent.tsx | 22 +++++++-- 6 files changed, 98 insertions(+), 25 deletions(-) diff --git a/pkg/booleanpolicy/violationmessages/printer/file_access.go b/pkg/booleanpolicy/violationmessages/printer/file_access.go index cc2a5a79a6e20..95b33dd0c663d 100644 --- a/pkg/booleanpolicy/violationmessages/printer/file_access.go +++ b/pkg/booleanpolicy/violationmessages/printer/file_access.go @@ -10,6 +10,24 @@ const ( UNKNOWN_FILE = "Unknown file" ) +var ( + operationToPretty = map[storage.FileAccess_Operation]string{ + storage.FileAccess_OPEN: "opened writable", + storage.FileAccess_UNLINK: "deleted", + storage.FileAccess_CREATE: "created", + storage.FileAccess_OWNERSHIP_CHANGE: "ownership changed", + storage.FileAccess_PERMISSION_CHANGE: "permission changed", + storage.FileAccess_RENAME: "renamed", + } +) + +func prettifyOperation(op storage.FileAccess_Operation) string { + if pretty, ok := operationToPretty[op]; ok { + return pretty + } + return "Unknown operation" +} + func UpdateFileAccessAlertViolationMessage(v *storage.Alert_Violation) { if v.GetType() != storage.Alert_Violation_FILE_ACCESS { return @@ -27,7 +45,7 @@ func UpdateFileAccessAlertViolationMessage(v *storage.Alert_Violation) { path = access.GetFile().GetEffectivePath() } - v.Message = fmt.Sprintf("'%v' accessed (%s)", path, access.GetOperation()) + v.Message = fmt.Sprintf("'%v' %s", path, prettifyOperation(access.GetOperation())) } func GenerateFileAccessViolation(access *storage.FileAccess) *storage.Alert_Violation { diff --git a/pkg/booleanpolicy/violationmessages/printer/file_access_test.go b/pkg/booleanpolicy/violationmessages/printer/file_access_test.go index a771b8f7c3308..83b8d5366d5db 100644 --- a/pkg/booleanpolicy/violationmessages/printer/file_access_test.go +++ b/pkg/booleanpolicy/violationmessages/printer/file_access_test.go @@ -47,7 +47,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { }, }, }, - expected: "'/etc/passwd' accessed (OPEN)", + expected: "'/etc/passwd' opened writable", }, { desc: "file CREATE operation", @@ -56,7 +56,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_CREATE, Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "touch"}}, }, - expected: "'/tmp/new_file' accessed (CREATE)", + expected: "'/tmp/new_file' created", }, { desc: "file UNLINK operation", @@ -65,7 +65,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_UNLINK, Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "rm"}}, }, - expected: "'/tmp/old_file' accessed (UNLINK)", + expected: "'/tmp/old_file' deleted", }, { desc: "file RENAME operation", @@ -74,7 +74,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_RENAME, Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "mv"}}, }, - expected: "'/tmp/renamed_file' accessed (RENAME)", + expected: "'/tmp/renamed_file' renamed", }, { desc: "file PERMISSION_CHANGE operation", @@ -83,7 +83,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_PERMISSION_CHANGE, Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "chmod"}}, }, - expected: "'/tmp/chmod_file' accessed (PERMISSION_CHANGE)", + expected: "'/tmp/chmod_file' permission changed", }, { desc: "file OWNERSHIP_CHANGE operation", @@ -92,7 +92,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_OWNERSHIP_CHANGE, Process: &storage.ProcessIndicator{Signal: &storage.ProcessSignal{Name: "chown"}}, }, - expected: "'/tmp/chown_file' accessed (OWNERSHIP_CHANGE)", + expected: "'/tmp/chown_file' ownership changed", }, { desc: "nil file path handling", @@ -103,7 +103,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Signal: &storage.ProcessSignal{Name: "test"}, }, }, - expected: "'" + UNKNOWN_FILE + "' accessed (OPEN)", + expected: "'" + UNKNOWN_FILE + "' opened writable", }, { desc: "nil process handling", @@ -112,7 +112,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Operation: storage.FileAccess_OPEN, Process: nil, }, - expected: "'/test/file' accessed (OPEN)", + expected: "'/test/file' opened writable", }, { desc: "nil process signal handling", @@ -123,7 +123,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Signal: nil, }, }, - expected: "'/test/file' accessed (OPEN)", + expected: "'/test/file' opened writable", }, { desc: "empty file path", @@ -134,7 +134,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Signal: &storage.ProcessSignal{Name: "test"}, }, }, - expected: "'" + UNKNOWN_FILE + "' accessed (OPEN)", + expected: "'" + UNKNOWN_FILE + "' opened writable", }, { desc: "Use EffectivePath if ActualPath is empty", @@ -145,7 +145,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Signal: &storage.ProcessSignal{Name: "test"}, }, }, - expected: "'/test/file' accessed (OPEN)", + expected: "'/test/file' opened writable", }, { desc: "empty process name", @@ -156,7 +156,7 @@ func TestUpdateFileAccessMessage(t *testing.T) { Signal: &storage.ProcessSignal{Name: ""}, }, }, - expected: "'/test/file' accessed (OPEN)", + expected: "'/test/file' opened writable", }, } diff --git a/pkg/fixtures/alert.go b/pkg/fixtures/alert.go index a3561e1af47fd..381233ae807a5 100644 --- a/pkg/fixtures/alert.go +++ b/pkg/fixtures/alert.go @@ -467,7 +467,8 @@ func WithFileAccessViolation(alert *storage.Alert) *storage.Alert { Timestamp: protocompat.TimestampNow(), Process: &storage.ProcessIndicator{ Signal: &storage.ProcessSignal{ - Name: "cp", + Name: "cp", + ExecFilePath: "/bin/cp", }, }, }), diff --git a/pkg/notifiers/format.go b/pkg/notifiers/format.go index 51b57cf8f324f..1547725e26e9f 100644 --- a/pkg/notifiers/format.go +++ b/pkg/notifiers/format.go @@ -50,9 +50,11 @@ const bplPolicyFormat = ` {{end}} {{else if isFileAccess .MessageAttributes }} {{stringify "Effective Path:" .MessageAttributes.FileAccess.File.EffectivePath | nestedList}} - {{stringify "Actual Path:" .MessageAttributes.FileAccess.File.ActualPath | nestedList}} - {{stringify "Operation:" .MessageAttributes.FileAccess.Operation | nestedList}} + {{if .MessageAttributes.FileAccess.File.ActualPath }} + {{stringify "Actual Path:" .MessageAttributes.FileAccess.File.ActualPath | nestedList}} + {{end}} {{stringify "Process Name:" .MessageAttributes.FileAccess.Process.Signal.Name | nestedList}} + {{stringify "Process Executable:" .MessageAttributes.FileAccess.Process.Signal.ExecFilePath | nestedList }} {{end}} {{end}} {{end}} diff --git a/pkg/notifiers/format_test.go b/pkg/notifiers/format_test.go index 54d25b883ee90..7f233663e3397 100644 --- a/pkg/notifiers/format_test.go +++ b/pkg/notifiers/format_test.go @@ -311,11 +311,45 @@ Time (UTC): 2021-01-20 22:42:02 Severity: Low Violations: - - '/etc/passwd' accessed (OPEN) + - '/etc/passwd' opened writable - Effective Path: /etc/passwd - Actual Path: /etc/passwd - - Operation: OPEN - Process Name: cp + - Process Executable: /bin/cp + +Policy Definition: + + Description: + - Alert on access to sensitive files on nodes + + Rationale: + - This is the rationale + + Remediation: + - This is the remediation + + Policy Criteria: + + Section Unnamed : + + - Actual Path: /etc/passwd + +Node: + - Name: ` + fixtureconsts.Node1 + ` + - Id: ` + fixtureconsts.Node1 + ` + - Cluster: ` + fixtureconsts.ClusterName1 + ` + - ClusterId: ` + fixtureconsts.Cluster1 + ` +` + expectedFormattedNodeAlertWithFileAccessEmptyActualPath = `Alert ID: ` + fixtureconsts.Alert1 + ` +Alert URL: https://localhost:8080/main/violations/` + fixtureconsts.Alert1 + ` +Time (UTC): 2021-01-20 22:42:02 +Severity: Low + +Violations: + - '/etc/passwd' opened writable + - Effective Path: /etc/passwd + - Process Name: cp + - Process Executable: /bin/cp Policy Definition: @@ -351,11 +385,11 @@ Violations: - This is a kube event violation - pod : nginx - container : nginx - - '/etc/passwd' accessed (OPEN) + - '/etc/passwd' opened writable - Effective Path: /etc/passwd - Actual Path: /etc/passwd - - Operation: OPEN - Process Name: cp + - Process Executable: /bin/cp - This is a process violation Policy Definition: @@ -433,6 +467,12 @@ func TestDeploymentFileAccessAlert(t *testing.T) { runFormatTest(t, fixtures.GetDeploymentFileAccessAlert(), expectedFormattedDeploymentAlertWithFileAccess) } +func TestNodeFileAccessEmptyActualPath(t *testing.T) { + alert := fixtures.GetNodeFileAccessAlert() + alert.Violations[0].GetFileAccess().GetFile().ActualPath = "" + runFormatTest(t, alert, expectedFormattedNodeAlertWithFileAccessEmptyActualPath) +} + func runFormatTest(t *testing.T, alert *storage.Alert, expectedFormattedAlert string) { funcMap := template.FuncMap{ "header": func(s string) string { diff --git a/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx b/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx index 5439db78177e4..77b14a83f2513 100644 --- a/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx +++ b/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx @@ -1,15 +1,21 @@ import type { ReactElement } from 'react'; import { DescriptionList, Divider, Flex, Title } from '@patternfly/react-core'; -import lowerCase from 'lodash/lowerCase'; -import upperFirst from 'lodash/upperFirst'; import DescriptionListItem from 'Components/DescriptionListItem'; import { getDateTime } from 'utils/dateUtils'; import type { FileAccess, FileOperation } from 'types/fileAccess.proto'; +const fileOperations: Map<FileOperation, string> = new Map([ + ['OPEN', 'Open (Writable)'], + ['CREATE', 'Create'], + ['UNLINK', 'Delete'], + ['RENAME', 'Rename'], + ['PERMISSION_CHANGE', 'Permission change'], + ['OWNERSHIP_CHANGE', 'Ownership change'], +]); + function formatOperation(operation: FileOperation): string { - // Convert SCREAMING_SNAKE_CASE to Sentence case - return upperFirst(lowerCase(operation)); + return fileOperations.get(operation) || 'Unknown'; } type FileAccessCardContentProps = { @@ -41,8 +47,14 @@ function FileAccessCardContent({ event }: FileAccessCardContentProps): ReactElem desc={moved.effectivePath || moved.actualPath} /> )} + {process?.signal?.name && ( + <DescriptionListItem term="Process name" desc={process.signal.name} /> + )} {process?.signal?.execFilePath && ( - <DescriptionListItem term="Process" desc={process.signal.execFilePath} /> + <DescriptionListItem + term="Process executable" + desc={process.signal.execFilePath} + /> )} {Number.isInteger(process?.signal?.uid) && ( <DescriptionListItem term="Process UID" desc={process.signal.uid} /> From 14f976bc3b9566d45dfdf3c8705882eb43e9a532 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann <shesselm@redhat.com> Date: Tue, 27 Jan 2026 20:30:42 +0100 Subject: [PATCH 039/232] fix(centralproxy): capitalize verb in auth error (#18699) --- sensor/common/centralproxy/authorizer.go | 3 +++ sensor/common/centralproxy/authorizer_test.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/sensor/common/centralproxy/authorizer.go b/sensor/common/centralproxy/authorizer.go index 2c212ea0f6a9b..6141544e0a042 100644 --- a/sensor/common/centralproxy/authorizer.go +++ b/sensor/common/centralproxy/authorizer.go @@ -76,6 +76,9 @@ func newK8sAuthorizer(client kubernetes.Interface) *k8sAuthorizer { // formatForbiddenErr creates a consistent forbidden error message for authorization failures. func formatForbiddenErr(user, verb, resource, group, namespace string) error { + // Uppercase the verb for readability. + verb = strings.ToUpper(verb) + // Format as resource.group using "core" for empty group. qualifiedResource := resource + "." + group if group == "" { diff --git a/sensor/common/centralproxy/authorizer_test.go b/sensor/common/centralproxy/authorizer_test.go index f2626476fe018..d0307a08c1140 100644 --- a/sensor/common/centralproxy/authorizer_test.go +++ b/sensor/common/centralproxy/authorizer_test.go @@ -196,7 +196,7 @@ func TestK8sAuthorizer_MissingPermission_Namespace(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), `user "limited-user" lacks list permission for resource "pods.core" in namespace "my-namespace"`) + assert.Contains(t, err.Error(), `user "limited-user" lacks LIST permission for resource "pods.core" in namespace "my-namespace"`) } func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { @@ -231,7 +231,7 @@ func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), `user "namespace-admin" lacks cluster-wide list permission for resource "pods.core"`) + assert.Contains(t, err.Error(), `user "namespace-admin" lacks cluster-wide LIST permission for resource "pods.core"`) } func TestK8sAuthorizer_SubjectAccessReviewError(t *testing.T) { From e84978ea0e5f1d12294bbb570f6159ed6c003966 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti <pedrottimark@gmail.com> Date: Tue, 27 Jan 2026 15:28:47 -0500 Subject: [PATCH 040/232] ROX-32833: Alphabetize searchFilterConfig in VirtualMachineCves (#18683) --- .../CompoundSearchFilter/attributes/nodeComponent.ts | 4 ++-- .../CompoundSearchFilter/attributes/virtualMachine.ts | 4 ++-- .../VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx | 3 ++- .../VirtualMachine/VirtualMachinePageVulnerabilities.tsx | 1 + .../src/Containers/Vulnerabilities/searchFilterConfig.ts | 4 ++-- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/nodeComponent.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/nodeComponent.ts index 61d33eab269cb..05ddf4a27efeb 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/nodeComponent.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/nodeComponent.ts @@ -4,14 +4,14 @@ import type { CompoundSearchFilterAttribute } from '../types'; export const Name: CompoundSearchFilterAttribute = { displayName: 'Name', - filterChipLabel: 'Image component name', + filterChipLabel: 'Node component name', searchTerm: 'Component', inputType: 'autocomplete', }; export const Version: CompoundSearchFilterAttribute = { displayName: 'Version', - filterChipLabel: 'Image component version', + filterChipLabel: 'Node component version', searchTerm: 'Component Version', inputType: 'text', }; diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/virtualMachine.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/virtualMachine.ts index 738064f4b14d3..dd591a95496a0 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/virtualMachine.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/virtualMachine.ts @@ -9,14 +9,14 @@ export const VirtualMachineCVEName: CompoundSearchFilterAttribute = { export const VirtualMachineComponentName: CompoundSearchFilterAttribute = { displayName: 'Name', - filterChipLabel: 'Component name', + filterChipLabel: 'Virtual machine component name', searchTerm: 'Component', inputType: 'text', }; export const VirtualMachineComponentVersion: CompoundSearchFilterAttribute = { displayName: 'Version', - filterChipLabel: 'Component version', + filterChipLabel: 'Virtual machine component version', searchTerm: 'Component Version', inputType: 'text', }; diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx index c39f5f67e62b2..3658bb24b9c1d 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx @@ -44,9 +44,9 @@ import { getVirtualMachineEntityPagePath } from '../../utils/searchUtils'; import { VIRTUAL_MACHINE_SORT_FIELD } from '../../utils/sortFields'; const searchFilterConfig = [ - virtualMachinesSearchFilterConfig, virtualMachinesClusterSearchFilterConfig, virtualMachinesNamespaceSearchFilterConfig, + virtualMachinesSearchFilterConfig, ]; export const sortFields = [VIRTUAL_MACHINE_SORT_FIELD]; @@ -112,6 +112,7 @@ function VirtualMachinesCvesTable() { <> <AdvancedFiltersToolbar className="pf-v5-u-px-sm pf-v5-u-pb-0" + defaultSearchFilterEntity="Virtual machine" includeCveSeverityFilters={false} includeCveStatusFilters={false} searchFilter={searchFilter} diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx index eaac427d5859e..27aff86290a52 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx @@ -126,6 +126,7 @@ function VirtualMachinePageVulnerabilities({ <VirtualMachineScanScopeAlert /> <AdvancedFiltersToolbar className="pf-v5-u-px-sm pf-v5-u-pb-0" + defaultSearchFilterEntity="CVE" searchFilter={searchFilter} searchFilterConfig={searchFilterConfig} onFilterChange={(newFilter) => { diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/searchFilterConfig.ts b/ui/apps/platform/src/Containers/Vulnerabilities/searchFilterConfig.ts index 85c33bdbb2917..eb7ae69da1318 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/searchFilterConfig.ts +++ b/ui/apps/platform/src/Containers/Vulnerabilities/searchFilterConfig.ts @@ -121,7 +121,7 @@ export const virtualMachineCVESearchFilterConfig: CompoundSearchFilterEntity = { }; export const virtualMachineComponentSearchFilterConfig: CompoundSearchFilterEntity = { - displayName: 'Component', + displayName: 'Virtual machine component', searchCategory: 'SEARCH_UNSET', // we don't have autocomplete for virtual machines attributes: [VirtualMachineComponentName, VirtualMachineComponentVersion], }; @@ -129,7 +129,7 @@ export const virtualMachineComponentSearchFilterConfig: CompoundSearchFilterEnti export const virtualMachinesClusterSearchFilterConfig: CompoundSearchFilterEntity = { displayName: 'Cluster', searchCategory: 'CLUSTERS', - attributes: [clusterNameAttribute, clusterIdAttribute], + attributes: [clusterIdAttribute, clusterNameAttribute], }; export const virtualMachinesNamespaceSearchFilterConfig: CompoundSearchFilterEntity = { From dda2c1440b51aeb517f2dc00e498988a54044449 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti <pedrottimark@gmail.com> Date: Tue, 27 Jan 2026 15:47:24 -0500 Subject: [PATCH 041/232] ROX-32804: Render TechPreviewLabel like TechnologyPreviewLabel (#18660) --- .../PatternFly/{ => PreviewLabel}/TechPreviewLabel.tsx | 4 +++- .../PatternFly/PreviewLabel/TechnologyPreviewLabel.tsx | 2 ++ .../platform/src/Containers/Integrations/IntegrationPage.tsx | 4 ++-- .../Integrations/IntegrationTiles/IntegrationTile.tsx | 2 +- .../IntegrationsListPage/IntegrationsListPage.tsx | 4 ++-- .../src/Containers/MainPage/Navigation/NavigationContent.tsx | 2 +- 6 files changed, 11 insertions(+), 7 deletions(-) rename ui/apps/platform/src/Components/PatternFly/{ => PreviewLabel}/TechPreviewLabel.tsx (59%) diff --git a/ui/apps/platform/src/Components/PatternFly/TechPreviewLabel.tsx b/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechPreviewLabel.tsx similarity index 59% rename from ui/apps/platform/src/Components/PatternFly/TechPreviewLabel.tsx rename to ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechPreviewLabel.tsx index 9bffb1a0407b5..18dbf9d5f426c 100644 --- a/ui/apps/platform/src/Components/PatternFly/TechPreviewLabel.tsx +++ b/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechPreviewLabel.tsx @@ -3,9 +3,11 @@ import type { LabelProps } from '@patternfly/react-core'; export type TechPreviewLabelProps = LabelProps; +// Render TechPreviewLabel when width is limited: in left navigation or integration tile. +// Render TechnologyPreviewLabel when when width is not limited: in heading. function TechPreviewLabel({ className, ...props }: TechPreviewLabelProps) { return ( - <Label isCompact color="orange" className={className} {...props}> + <Label isCompact color="purple" className={className} {...props}> Tech preview </Label> ); diff --git a/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechnologyPreviewLabel.tsx b/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechnologyPreviewLabel.tsx index b68772d288f31..559789a700f9a 100644 --- a/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechnologyPreviewLabel.tsx +++ b/ui/apps/platform/src/Components/PatternFly/PreviewLabel/TechnologyPreviewLabel.tsx @@ -1,5 +1,7 @@ import PreviewLabelBase from './PreviewLabelBase'; +// Render TechPreviewLabel when width is limited: in left navigation or integration tile. +// Render TechnologyPreviewLabel when when width is not limited: in heading. export function TechnologyPreviewLabel() { return ( <PreviewLabelBase diff --git a/ui/apps/platform/src/Containers/Integrations/IntegrationPage.tsx b/ui/apps/platform/src/Containers/Integrations/IntegrationPage.tsx index e00140bb37f0f..3f08d90e2fb32 100644 --- a/ui/apps/platform/src/Containers/Integrations/IntegrationPage.tsx +++ b/ui/apps/platform/src/Containers/Integrations/IntegrationPage.tsx @@ -18,7 +18,7 @@ import BreadcrumbItemLink from 'Components/BreadcrumbItemLink'; import type { Traits } from 'types/traits.proto'; import TraitsOriginLabel from 'Components/TraitsOriginLabel'; import { isUserResource } from 'utils/traits.utils'; -import TechPreviewLabel from 'Components/PatternFly/TechPreviewLabel'; +import TechnologyPreviewLabel from 'Components/PatternFly/PreviewLabel/TechnologyPreviewLabel'; import { getIntegrationLabel } from './utils/integrationsList'; import { getEditDisabledMessage, getIsMachineAccessConfig } from './utils/integrationUtils'; import usePageState from './hooks/usePageState'; @@ -72,7 +72,7 @@ function IntegrationPage({ title, name, traits, children }: IntegrationPageProps )} {isTechPreview && ( <FlexItem> - <TechPreviewLabel /> + <TechnologyPreviewLabel /> </FlexItem> )} {hasTraitsLabel && <TraitsOriginLabel traits={traits} />} diff --git a/ui/apps/platform/src/Containers/Integrations/IntegrationTiles/IntegrationTile.tsx b/ui/apps/platform/src/Containers/Integrations/IntegrationTiles/IntegrationTile.tsx index 08a7d166408f0..fe40f177f3f8f 100644 --- a/ui/apps/platform/src/Containers/Integrations/IntegrationTiles/IntegrationTile.tsx +++ b/ui/apps/platform/src/Containers/Integrations/IntegrationTiles/IntegrationTile.tsx @@ -11,7 +11,7 @@ import { } from '@patternfly/react-core'; import { Link } from 'react-router-dom-v5-compat'; -import TechPreviewLabel from 'Components/PatternFly/TechPreviewLabel'; +import TechPreviewLabel from 'Components/PatternFly/PreviewLabel/TechPreviewLabel'; type IntegrationTileProps = { categories?: string; diff --git a/ui/apps/platform/src/Containers/Integrations/IntegrationsListPage/IntegrationsListPage.tsx b/ui/apps/platform/src/Containers/Integrations/IntegrationsListPage/IntegrationsListPage.tsx index 523921da074d9..dcb2e54a94f98 100644 --- a/ui/apps/platform/src/Containers/Integrations/IntegrationsListPage/IntegrationsListPage.tsx +++ b/ui/apps/platform/src/Containers/Integrations/IntegrationsListPage/IntegrationsListPage.tsx @@ -21,7 +21,7 @@ import { actions as cloudSourcesActions } from 'reducers/cloudSources'; import { getTableUIState } from 'utils/getTableUIState'; import { integrationsPath } from 'routePaths'; -import TechPreviewLabel from 'Components/PatternFly/TechPreviewLabel'; +import TechnologyPreviewLabel from 'Components/PatternFly/PreviewLabel/TechnologyPreviewLabel'; import useIntegrations from '../hooks/useIntegrations'; import { getIntegrationLabel } from '../utils/integrationsList'; import { @@ -122,7 +122,7 @@ function IntegrationsListPage({ alignItems={{ default: 'alignItemsCenter' }} > <span>{typeLabel}</span> - {isTechPreview && <TechPreviewLabel />} + {isTechPreview && <TechnologyPreviewLabel />} </Flex> )} diff --git a/ui/apps/platform/src/Containers/MainPage/Navigation/NavigationContent.tsx b/ui/apps/platform/src/Containers/MainPage/Navigation/NavigationContent.tsx index 60e9aa61f4cbe..6112980982a60 100644 --- a/ui/apps/platform/src/Containers/MainPage/Navigation/NavigationContent.tsx +++ b/ui/apps/platform/src/Containers/MainPage/Navigation/NavigationContent.tsx @@ -1,7 +1,7 @@ import type { CSSProperties, ReactElement, ReactNode } from 'react'; import { Label } from '@patternfly/react-core'; -import TechPreviewLabel from 'Components/PatternFly/TechPreviewLabel'; +import TechPreviewLabel from 'Components/PatternFly/PreviewLabel/TechPreviewLabel'; type NavigationContentVariant = 'Deprecated' | 'TechPreview'; From c4ff83a111b0563e335927713916e13cbff48c33 Mon Sep 17 00:00:00 2001 From: Charmik Sheth <101146970+charmik-redhat@users.noreply.github.com> Date: Wed, 28 Jan 2026 02:31:40 +0530 Subject: [PATCH 042/232] Disable deployment container migration for ImageV2 (#18684) --- .../migration.go | 2 +- .../migration_impl.go | 90 +------- .../migration_test.go | 207 ------------------ .../schema/convert_deployments.go | 138 ------------ .../schema/convert_deployments_test.go | 21 -- .../schema/deployments.go | 195 ----------------- 6 files changed, 8 insertions(+), 645 deletions(-) delete mode 100644 migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_test.go delete mode 100644 migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments.go delete mode 100644 migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments_test.go delete mode 100644 migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/deployments.go diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration.go index d77d584286dce..e1fc286a40b55 100644 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration.go +++ b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration.go @@ -15,7 +15,7 @@ var ( migration = types.Migration{ StartingSeqNum: startSeqNum, VersionAfter: &storage.Version{SeqNum: int32(startSeqNum + 1)}, - Run: migrate, + Run: migrate, } ) diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_impl.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_impl.go index d635b5d534c4d..d4f6196662719 100644 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_impl.go +++ b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_impl.go @@ -1,95 +1,19 @@ package m213tom214 import ( - "github.com/hashicorp/go-multierror" - "github.com/jackc/pgx/v5" - "github.com/stackrox/rox/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema" "github.com/stackrox/rox/migrator/types" "github.com/stackrox/rox/pkg/logging" - "github.com/stackrox/rox/pkg/postgres" - "github.com/stackrox/rox/pkg/postgres/pgutils" - "github.com/stackrox/rox/pkg/uuid" ) var ( log = logging.LoggerForModule() ) -var batchSize = 5000 - -func migrate(database *types.Databases) error { - // Use databases.DBCtx to take advantage of the transaction wrapping present in the migration initiator - pgutils.CreateTableFromModel(database.DBCtx, database.GormDB, schema.CreateTableDeploymentsStmt) - log.Infof("Batch size is %d", batchSize) - - db := database.PostgresDB - - conn, err := db.Acquire(database.DBCtx) - defer conn.Release() - if err != nil { - return err - } - updatedRows := 0 - for { - batch := pgx.Batch{} - // This will continue looping through the containers until there are no more containers that need to have their - // image_idv2 field populated, in batches up to batchSize - getStmt := `SELECT image_name_fullname, image_id FROM deployments_containers WHERE image_id is not null AND image_id != '' AND image_name_fullname is not null AND image_name_fullname != '' AND (image_idv2 is null OR image_idv2 = '') LIMIT $1` - rows, err := db.Query(database.DBCtx, getStmt, batchSize) - if err != nil { - return err - } - defer rows.Close() - - containers, err := readRows(rows) - if err != nil { - return err - } - for _, container := range containers { - updateStmt := `UPDATE deployments_containers SET image_idv2 = $1 WHERE image_name_fullname = $2 AND image_id = $3` - imageIdV2 := uuid.NewV5FromNonUUIDs(container.ImageNameFullName, container.ImageID).String() - batch.Queue(updateStmt, imageIdV2, container.ImageNameFullName, container.ImageID) - } - batchResults := conn.SendBatch(database.DBCtx, &batch) - var result *multierror.Error - for i := 0; i < batch.Len(); i++ { - _, err = batchResults.Exec() - result = multierror.Append(result, err) - if err == nil { - updatedRows += 1 - } - } - if err = batchResults.Close(); err != nil { - return err - } - if err = result.ErrorOrNil(); err != nil { - return err - } - if len(containers) != batchSize { - log.Infof("Populated the image_idv2 field in deployment containers. %d rows updated.", updatedRows) - return nil - } - } -} - -func readRows(rows *postgres.Rows) ([]*schema.DeploymentsContainers, error) { - var containers []*schema.DeploymentsContainers - - for rows.Next() { - var imageName string - var imageId string - - if err := rows.Scan(&imageName, &imageId); err != nil { - log.Errorf("Error scanning row: %v", err) - } - - container := &schema.DeploymentsContainers{ - ImageID: imageId, - ImageNameFullName: imageName, - } - containers = append(containers, container) - } - - log.Debugf("Read returned %d containers", len(containers)) - return containers, rows.Err() +func migrate(_ *types.Databases) error { + // This migration has been reverted due to the feature being disabled by default. + // We can't easily revert due to the way migrations stack on top of each other. + // The original changes can be found in commit db2652bb58a054211f32eed4ac18abfe17074ea0 + // Or via https://github.com/stackrox/stackrox/commit/db2652bb58a054211f32eed4ac18abfe17074ea0 + log.Debugf("Skipping migration 213 to 214") + return nil } diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_test.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_test.go deleted file mode 100644 index 04e6fb7204110..0000000000000 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/migration_test.go +++ /dev/null @@ -1,207 +0,0 @@ -//go:build sql_integration - -package m213tom214 - -import ( - "context" - "fmt" - "testing" - - "github.com/stackrox/rox/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema" - pghelper "github.com/stackrox/rox/migrator/migrations/postgreshelper" - "github.com/stackrox/rox/migrator/types" - "github.com/stackrox/rox/pkg/fixtures" - "github.com/stackrox/rox/pkg/postgres" - "github.com/stackrox/rox/pkg/postgres/pgutils" - "github.com/stackrox/rox/pkg/sac" - "github.com/stackrox/rox/pkg/uuid" - "github.com/stretchr/testify/suite" -) - -type migrationTestSuite struct { - suite.Suite - - db *pghelper.TestPostgres - ctx context.Context - existingDB bool -} - -func TestMigration(t *testing.T) { - suite.Run(t, new(migrationTestSuite)) -} - -func (s *migrationTestSuite) SetupSuite() { - s.ctx = sac.WithAllAccess(context.Background()) - s.db = pghelper.ForT(s.T(), false) - // Use the below lines to use a large existing database for testing. - // This is beneficial to test large batches at once. - // s.db = pghelper.ForTExistingDB(s.T(), false, "7593dc135f89446b_oIIuR") - // s.existingDB = true - if !s.existingDB { - pgutils.CreateTableFromModel(s.ctx, s.db.GetGormDB(), schema.CreateTableDeploymentsStmt) - } -} - -func (s *migrationTestSuite) TestMigration() { - // Test with multiple batch sizes to catch edge cases where the number of rows in the DB is on the threshold of the - // batch size - for _, i := range []int{3, 4, 5} { - s.Run(fmt.Sprintf("Batch size of %d", i), func() { - batchSize = i - if !s.existingDB { - deployments := map[string]*schema.DeploymentsContainers{ - "08b69e6e-a96e-5b9a-b814-93d004d01cd8": { - ImageNameFullName: "us-central1-artifactregistry.gcr.io/gke-release/gke-release/gke-metrics-collector:20250508_2300_RC0@sha256:d074c77bdc0ee1c4245113e62d93ef1ed6f1a51960ea854a972861a6a0c774ce", - ImageID: "sha256:d074c77bdc0ee1c4245113e62d93ef1ed6f1a51960ea854a972861a6a0c774ce", - ImageIDV2: "08b69e6e-a96e-5b9a-b814-93d004d01cd8", - DeploymentsID: fixtures.GetDeployment().GetId(), - Idx: 0, - }, - "f5e05ef2-f2a8-50b2-90ac-c1a445767e94": { - ImageNameFullName: "us-central1-artifactregistry.gcr.io/gke-release/gke-release/gke-metrics-agent:1.15.6-gke.0@sha256:8d3f6c749a8589ac729c66564b41e8babb35c5f181e774cd586c9d2761beeb96", - ImageID: "sha256:8d3f6c749a8589ac729c66564b41e8babb35c5f181e774cd586c9d2761beeb96", - ImageIDV2: "f5e05ef2-f2a8-50b2-90ac-c1a445767e94", - DeploymentsID: fixtures.GetDeployment().GetId(), - Idx: 1, - }, - "e338a8ed-8b3e-5294-8b54-fb907774e2e8": { - ImageNameFullName: "us-central1-artifactregistry.gcr.io/gke-release/gke-release/cpvpa:v0.8.9-gke.11@sha256:ac9bb16bbfeefd9947ceb049c30fe3e6f2c18cbafc2fb213ef3ef88f940d4a29", - ImageID: "sha256:ac9bb16bbfeefd9947ceb049c30fe3e6f2c18cbafc2fb213ef3ef88f940d4a29", - ImageIDV2: "e338a8ed-8b3e-5294-8b54-fb907774e2e8", - DeploymentsID: fixtures.GetDeployment().GetId(), - Idx: 2, - }, - "6933b4da-6607-517a-aa4d-9f2ac5caac78": { - ImageNameFullName: "us-central1-artifactregistry.gcr.io/gke-release/gke-release/fluent-bit:v1.8.1200-gke.14@sha256:fe028dfcf00bdaded6770720de8df8f3d24e841f41a968138ae00d699003aa0f", - ImageID: "sha256:fe028dfcf00bdaded6770720de8df8f3d24e841f41a968138ae00d699003aa0f", - ImageIDV2: "6933b4da-6607-517a-aa4d-9f2ac5caac78", - DeploymentsID: fixtures.GetDeployment().GetId(), - Idx: 3, - }, - } - - err := insertIntoDeployments(s.ctx, s.db, &schema.Deployments{ - ID: fixtures.GetDeployment().GetId(), - Name: fixtures.GetDeployment().GetName(), - Type: fixtures.GetDeployment().GetType(), - Namespace: fixtures.GetDeployment().GetNamespace(), - NamespaceID: fixtures.GetDeployment().GetNamespaceId(), - OrchestratorComponent: fixtures.GetDeployment().GetOrchestratorComponent(), - Labels: fixtures.GetDeployment().GetLabels(), - PodLabels: fixtures.GetDeployment().GetPodLabels(), - ClusterID: fixtures.GetDeployment().GetClusterId(), - ClusterName: fixtures.GetDeployment().GetClusterName(), - Annotations: fixtures.GetDeployment().GetAnnotations(), - Priority: fixtures.GetDeployment().GetPriority(), - ServiceAccount: fixtures.GetDeployment().GetServiceAccount(), - ServiceAccountPermissionLevel: fixtures.GetDeployment().GetServiceAccountPermissionLevel(), - RiskScore: fixtures.GetDeployment().GetRiskScore(), - PlatformComponent: fixtures.GetDeployment().GetPlatformComponent(), - }) - s.Require().NoError(err) - - for _, deployment := range deployments { - sql := "INSERT INTO deployments_containers (image_name_fullname, image_id, deployments_id, idx) VALUES ($1, $2, $3, $4)" - _, err := s.db.Exec(s.ctx, sql, deployment.ImageNameFullName, deployment.ImageID, deployment.DeploymentsID, deployment.Idx) - s.Require().NoError(err) - } - - dbs := &types.Databases{ - GormDB: s.db.GetGormDB(), - PostgresDB: s.db.DB, - DBCtx: s.ctx, - } - - s.Require().NoError(migration.Run(dbs)) - - sql := "SELECT image_name_fullname, image_id, image_idv2, deployments_id, idx FROM deployments_containers" - rows, err := s.db.Query(s.ctx, sql) - s.Require().NoError(err) - defer rows.Close() - containers, err := readRowsWithIDV2(rows) - s.Require().NoError(err) - s.Require().Len(containers, 4) - for _, container := range containers { - expectedDeployment, found := deployments[container.ImageIDV2] - s.Require().True(found) - s.Equal(expectedDeployment, container) - } - } else { - limit := 10000 - page := 0 - for { - sql := "SELECT image_name_fullname, image_id, image_idv2, deployments_id, idx FROM deployments_containers LIMIT $1 OFFSET $2" - rows, err := s.db.Query(s.ctx, sql, limit, page*limit) - s.Require().NoError(err) - containers, err := readRowsWithIDV2(rows) - s.Require().NoError(err) - for _, container := range containers { - s.Equal(uuid.NewV5FromNonUUIDs(container.ImageNameFullName, container.ImageID).String(), container.ImageIDV2) - } - rows.Close() - if len(containers) != limit { - break - } - page++ - } - } - _, err := s.db.Exec(s.ctx, "DELETE FROM deployments_containers WHERE true") - s.Require().NoError(err) - _, err = s.db.Exec(s.ctx, "DELETE FROM deployments WHERE true") - s.Require().NoError(err) - }) - } -} - -func readRowsWithIDV2(rows *postgres.Rows) ([]*schema.DeploymentsContainers, error) { - var containers []*schema.DeploymentsContainers - - for rows.Next() { - var imageName string - var imageId string - var imageIdV2 string - var deploymentsID string - var idx int - - if err := rows.Scan(&imageName, &imageId, &imageIdV2, &deploymentsID, &idx); err != nil { - return nil, pgutils.ErrNilIfNoRows(err) - } - - container := &schema.DeploymentsContainers{ - ImageID: imageId, - ImageNameFullName: imageName, - ImageIDV2: imageIdV2, - DeploymentsID: deploymentsID, - Idx: idx, - } - containers = append(containers, container) - } - - return containers, rows.Err() -} - -func insertIntoDeployments(ctx context.Context, db postgres.DB, obj *schema.Deployments) error { - values := []interface{}{ - pgutils.NilOrUUID(obj.ID), - obj.Name, - obj.Type, - obj.Namespace, - pgutils.NilOrUUID(obj.NamespaceID), - obj.OrchestratorComponent, - pgutils.EmptyOrMap(obj.Labels), - pgutils.EmptyOrMap(obj.PodLabels), - pgutils.NilOrUUID(obj.ClusterID), - obj.ClusterName, - pgutils.EmptyOrMap(obj.Annotations), - obj.Priority, - obj.ServiceAccount, - obj.ServiceAccountPermissionLevel, - obj.RiskScore, - obj.PlatformComponent, - } - - finalStr := "INSERT INTO deployments (Id, Name, Type, Namespace, NamespaceId, OrchestratorComponent, Labels, PodLabels, ClusterId, ClusterName, Annotations, Priority, ServiceAccount, ServiceAccountPermissionLevel, RiskScore, PlatformComponent) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, Name = EXCLUDED.Name, Type = EXCLUDED.Type, Namespace = EXCLUDED.Namespace, NamespaceId = EXCLUDED.NamespaceId, OrchestratorComponent = EXCLUDED.OrchestratorComponent, Labels = EXCLUDED.Labels, PodLabels = EXCLUDED.PodLabels, ClusterId = EXCLUDED.ClusterId, ClusterName = EXCLUDED.ClusterName, Annotations = EXCLUDED.Annotations, Priority = EXCLUDED.Priority, ServiceAccount = EXCLUDED.ServiceAccount, ServiceAccountPermissionLevel = EXCLUDED.ServiceAccountPermissionLevel, RiskScore = EXCLUDED.RiskScore, PlatformComponent = EXCLUDED.PlatformComponent" - _, err := db.Exec(ctx, finalStr, values...) - - return err -} diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments.go deleted file mode 100644 index f4e45a33637c1..0000000000000 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments.go +++ /dev/null @@ -1,138 +0,0 @@ -// Code generated by pg-bindings generator. DO NOT EDIT. -package schema - -import ( - "github.com/lib/pq" - "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/protocompat" -) - -// ConvertDeploymentFromProto converts a `*storage.Deployment` to Gorm model -func ConvertDeploymentFromProto(obj *storage.Deployment) (*Deployments, error) { - serialized, err := obj.MarshalVT() - if err != nil { - return nil, err - } - model := &Deployments{ - ID: obj.GetId(), - Name: obj.GetName(), - Type: obj.GetType(), - Namespace: obj.GetNamespace(), - NamespaceID: obj.GetNamespaceId(), - OrchestratorComponent: obj.GetOrchestratorComponent(), - Labels: obj.GetLabels(), - PodLabels: obj.GetPodLabels(), - Created: protocompat.NilOrTime(obj.GetCreated()), - ClusterID: obj.GetClusterId(), - ClusterName: obj.GetClusterName(), - Annotations: obj.GetAnnotations(), - Priority: obj.GetPriority(), - ImagePullSecrets: pq.Array(obj.GetImagePullSecrets()).(*pq.StringArray), - ServiceAccount: obj.GetServiceAccount(), - ServiceAccountPermissionLevel: obj.GetServiceAccountPermissionLevel(), - RiskScore: obj.GetRiskScore(), - PlatformComponent: obj.GetPlatformComponent(), - Serialized: serialized, - } - return model, nil -} - -// ConvertContainerFromProto converts a `*storage.Container` to Gorm model -func ConvertContainerFromProto(obj *storage.Container, idx int, deploymentID string) (*DeploymentsContainers, error) { - model := &DeploymentsContainers{ - DeploymentsID: deploymentID, - Idx: idx, - ImageID: obj.GetImage().GetId(), - ImageNameRegistry: obj.GetImage().GetName().GetRegistry(), - ImageNameRemote: obj.GetImage().GetName().GetRemote(), - ImageNameTag: obj.GetImage().GetName().GetTag(), - ImageNameFullName: obj.GetImage().GetName().GetFullName(), - ImageIDV2: obj.GetImage().GetIdV2(), - SecurityContextPrivileged: obj.GetSecurityContext().GetPrivileged(), - SecurityContextDropCapabilities: pq.Array(obj.GetSecurityContext().GetDropCapabilities()).(*pq.StringArray), - SecurityContextAddCapabilities: pq.Array(obj.GetSecurityContext().GetAddCapabilities()).(*pq.StringArray), - SecurityContextReadOnlyRootFilesystem: obj.GetSecurityContext().GetReadOnlyRootFilesystem(), - ResourcesCPUCoresRequest: obj.GetResources().GetCpuCoresRequest(), - ResourcesCPUCoresLimit: obj.GetResources().GetCpuCoresLimit(), - ResourcesMemoryMbRequest: obj.GetResources().GetMemoryMbRequest(), - ResourcesMemoryMbLimit: obj.GetResources().GetMemoryMbLimit(), - } - return model, nil -} - -// ConvertContainerConfig_EnvironmentConfigFromProto converts a `*storage.ContainerConfig_EnvironmentConfig` to Gorm model -func ConvertContainerConfig_EnvironmentConfigFromProto(obj *storage.ContainerConfig_EnvironmentConfig, idx int, deploymentID string, deploymentContainerIdx int) (*DeploymentsContainersEnvs, error) { - model := &DeploymentsContainersEnvs{ - DeploymentsID: deploymentID, - DeploymentsContainersIdx: deploymentContainerIdx, - Idx: idx, - Key: obj.GetKey(), - Value: obj.GetValue(), - EnvVarSource: obj.GetEnvVarSource(), - } - return model, nil -} - -// ConvertVolumeFromProto converts a `*storage.Volume` to Gorm model -func ConvertVolumeFromProto(obj *storage.Volume, idx int, deploymentID string, deploymentContainerIdx int) (*DeploymentsContainersVolumes, error) { - model := &DeploymentsContainersVolumes{ - DeploymentsID: deploymentID, - DeploymentsContainersIdx: deploymentContainerIdx, - Idx: idx, - Name: obj.GetName(), - Source: obj.GetSource(), - Destination: obj.GetDestination(), - ReadOnly: obj.GetReadOnly(), - Type: obj.GetType(), - } - return model, nil -} - -// ConvertEmbeddedSecretFromProto converts a `*storage.EmbeddedSecret` to Gorm model -func ConvertEmbeddedSecretFromProto(obj *storage.EmbeddedSecret, idx int, deploymentID string, deploymentContainerIdx int) (*DeploymentsContainersSecrets, error) { - model := &DeploymentsContainersSecrets{ - DeploymentsID: deploymentID, - DeploymentsContainersIdx: deploymentContainerIdx, - Idx: idx, - Name: obj.GetName(), - Path: obj.GetPath(), - } - return model, nil -} - -// ConvertPortConfigFromProto converts a `*storage.PortConfig` to Gorm model -func ConvertPortConfigFromProto(obj *storage.PortConfig, idx int, deploymentID string) (*DeploymentsPorts, error) { - model := &DeploymentsPorts{ - DeploymentsID: deploymentID, - Idx: idx, - ContainerPort: obj.GetContainerPort(), - Protocol: obj.GetProtocol(), - Exposure: obj.GetExposure(), - } - return model, nil -} - -// ConvertPortConfig_ExposureInfoFromProto converts a `*storage.PortConfig_ExposureInfo` to Gorm model -func ConvertPortConfig_ExposureInfoFromProto(obj *storage.PortConfig_ExposureInfo, idx int, deploymentID string, deploymentPortIdx int) (*DeploymentsPortsExposureInfos, error) { - model := &DeploymentsPortsExposureInfos{ - DeploymentsID: deploymentID, - DeploymentsPortsIdx: deploymentPortIdx, - Idx: idx, - Level: obj.GetLevel(), - ServiceName: obj.GetServiceName(), - ServicePort: obj.GetServicePort(), - NodePort: obj.GetNodePort(), - ExternalIps: pq.Array(obj.GetExternalIps()).(*pq.StringArray), - ExternalHostnames: pq.Array(obj.GetExternalHostnames()).(*pq.StringArray), - } - return model, nil -} - -// ConvertDeploymentToProto converts Gorm model `Deployments` to its protobuf type object -func ConvertDeploymentToProto(m *Deployments) (*storage.Deployment, error) { - var msg storage.Deployment - if err := msg.UnmarshalVTUnsafe(m.Serialized); err != nil { - return nil, err - } - return &msg, nil -} diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments_test.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments_test.go deleted file mode 100644 index da902ab7c408e..0000000000000 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/convert_deployments_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Code generated by pg-bindings generator. DO NOT EDIT. -package schema - -import ( - "testing" - - "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/protoassert" - "github.com/stackrox/rox/pkg/testutils" - "github.com/stretchr/testify/assert" -) - -func TestDeploymentSerialization(t *testing.T) { - obj := &storage.Deployment{} - assert.NoError(t, testutils.FullInit(obj, testutils.UniqueInitializer(), testutils.JSONFieldsFilter)) - m, err := ConvertDeploymentFromProto(obj) - assert.NoError(t, err) - conv, err := ConvertDeploymentToProto(m) - assert.NoError(t, err) - protoassert.Equal(t, obj, conv) -} diff --git a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/deployments.go b/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/deployments.go deleted file mode 100644 index e1bab0bd6b8aa..0000000000000 --- a/migrator/migrations/m_213_to_m_214_populate_deployment_containers_imageidv2/schema/deployments.go +++ /dev/null @@ -1,195 +0,0 @@ -// Code generated by pg-bindings generator. DO NOT EDIT. - -package schema - -import ( - "reflect" - "time" - - "github.com/lib/pq" - v1 "github.com/stackrox/rox/generated/api/v1" - "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/postgres" - "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" - "github.com/stackrox/rox/pkg/search" -) - -var ( - // CreateTableDeploymentsStmt holds the create statement for table `deployments`. - CreateTableDeploymentsStmt = &postgres.CreateStmts{ - GormModel: (*Deployments)(nil), - Children: []*postgres.CreateStmts{ - &postgres.CreateStmts{ - GormModel: (*DeploymentsContainers)(nil), - Children: []*postgres.CreateStmts{ - &postgres.CreateStmts{ - GormModel: (*DeploymentsContainersEnvs)(nil), - Children: []*postgres.CreateStmts{}, - }, - &postgres.CreateStmts{ - GormModel: (*DeploymentsContainersVolumes)(nil), - Children: []*postgres.CreateStmts{}, - }, - &postgres.CreateStmts{ - GormModel: (*DeploymentsContainersSecrets)(nil), - Children: []*postgres.CreateStmts{}, - }, - }, - }, - &postgres.CreateStmts{ - GormModel: (*DeploymentsPorts)(nil), - Children: []*postgres.CreateStmts{ - &postgres.CreateStmts{ - GormModel: (*DeploymentsPortsExposureInfos)(nil), - Children: []*postgres.CreateStmts{}, - }, - }, - }, - }, - } - - // DeploymentsSchema is the go schema for table `deployments`. - DeploymentsSchema = func() *walker.Schema { - schema := walker.Walk(reflect.TypeOf((*storage.Deployment)(nil)), "deployments") - - schema.SetOptionsMap(search.Walk(v1.SearchCategory_DEPLOYMENTS, "deployment", (*storage.Deployment)(nil))) - schema.SetSearchScope([]v1.SearchCategory{ - v1.SearchCategory_IMAGE_VULNERABILITIES_V2, - v1.SearchCategory_IMAGE_COMPONENTS_V2, - v1.SearchCategory_IMAGE_VULNERABILITIES, - v1.SearchCategory_COMPONENT_VULN_EDGE, - v1.SearchCategory_IMAGE_COMPONENTS, - v1.SearchCategory_IMAGE_COMPONENT_EDGE, - v1.SearchCategory_IMAGE_VULN_EDGE, - v1.SearchCategory_IMAGES, - v1.SearchCategory_IMAGES_V2, - v1.SearchCategory_DEPLOYMENTS, - v1.SearchCategory_NAMESPACES, - v1.SearchCategory_CLUSTERS, - v1.SearchCategory_PROCESS_INDICATORS, - v1.SearchCategory_PODS, - }...) - schema.ScopingResource = resources.Deployment - return schema - }() -) - -const ( - // DeploymentsTableName specifies the name of the table in postgres. - DeploymentsTableName = "deployments" - // DeploymentsContainersTableName specifies the name of the table in postgres. - DeploymentsContainersTableName = "deployments_containers" - // DeploymentsContainersEnvsTableName specifies the name of the table in postgres. - DeploymentsContainersEnvsTableName = "deployments_containers_envs" - // DeploymentsContainersVolumesTableName specifies the name of the table in postgres. - DeploymentsContainersVolumesTableName = "deployments_containers_volumes" - // DeploymentsContainersSecretsTableName specifies the name of the table in postgres. - DeploymentsContainersSecretsTableName = "deployments_containers_secrets" - // DeploymentsPortsTableName specifies the name of the table in postgres. - DeploymentsPortsTableName = "deployments_ports" - // DeploymentsPortsExposureInfosTableName specifies the name of the table in postgres. - DeploymentsPortsExposureInfosTableName = "deployments_ports_exposure_infos" -) - -// Deployments holds the Gorm model for Postgres table `deployments`. -type Deployments struct { - ID string `gorm:"column:id;type:uuid;primaryKey"` - Name string `gorm:"column:name;type:varchar"` - Type string `gorm:"column:type;type:varchar"` - Namespace string `gorm:"column:namespace;type:varchar;index:deployments_sac_filter,type:btree"` - NamespaceID string `gorm:"column:namespaceid;type:uuid"` - OrchestratorComponent bool `gorm:"column:orchestratorcomponent;type:bool"` - Labels map[string]string `gorm:"column:labels;type:jsonb"` - PodLabels map[string]string `gorm:"column:podlabels;type:jsonb"` - Created *time.Time `gorm:"column:created;type:timestamp"` - ClusterID string `gorm:"column:clusterid;type:uuid;index:deployments_sac_filter,type:btree"` - ClusterName string `gorm:"column:clustername;type:varchar"` - Annotations map[string]string `gorm:"column:annotations;type:jsonb"` - Priority int64 `gorm:"column:priority;type:bigint"` - ImagePullSecrets *pq.StringArray `gorm:"column:imagepullsecrets;type:text[]"` - ServiceAccount string `gorm:"column:serviceaccount;type:varchar"` - ServiceAccountPermissionLevel storage.PermissionLevel `gorm:"column:serviceaccountpermissionlevel;type:integer"` - RiskScore float32 `gorm:"column:riskscore;type:numeric;index:deployments_riskscore,type:btree"` - PlatformComponent bool `gorm:"column:platformcomponent;type:bool"` - Serialized []byte `gorm:"column:serialized;type:bytea"` -} - -// DeploymentsContainers holds the Gorm model for Postgres table `deployments_containers`. -type DeploymentsContainers struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentscontainers_idx,type:btree"` - ImageID string `gorm:"column:image_id;type:varchar;index:deploymentscontainers_image_id,type:hash"` - ImageNameRegistry string `gorm:"column:image_name_registry;type:varchar"` - ImageNameRemote string `gorm:"column:image_name_remote;type:varchar"` - ImageNameTag string `gorm:"column:image_name_tag;type:varchar"` - ImageNameFullName string `gorm:"column:image_name_fullname;type:varchar"` - ImageIDV2 string `gorm:"column:image_idv2;type:varchar;index:deploymentscontainers_image_idv2,type:btree"` - SecurityContextPrivileged bool `gorm:"column:securitycontext_privileged;type:bool"` - SecurityContextDropCapabilities *pq.StringArray `gorm:"column:securitycontext_dropcapabilities;type:text[]"` - SecurityContextAddCapabilities *pq.StringArray `gorm:"column:securitycontext_addcapabilities;type:text[]"` - SecurityContextReadOnlyRootFilesystem bool `gorm:"column:securitycontext_readonlyrootfilesystem;type:bool"` - ResourcesCPUCoresRequest float32 `gorm:"column:resources_cpucoresrequest;type:numeric"` - ResourcesCPUCoresLimit float32 `gorm:"column:resources_cpucoreslimit;type:numeric"` - ResourcesMemoryMbRequest float32 `gorm:"column:resources_memorymbrequest;type:numeric"` - ResourcesMemoryMbLimit float32 `gorm:"column:resources_memorymblimit;type:numeric"` - DeploymentsRef Deployments `gorm:"foreignKey:deployments_id;references:id;belongsTo;constraint:OnDelete:CASCADE"` -} - -// DeploymentsContainersEnvs holds the Gorm model for Postgres table `deployments_containers_envs`. -type DeploymentsContainersEnvs struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - DeploymentsContainersIdx int `gorm:"column:deployments_containers_idx;type:integer;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentscontainersenvs_idx,type:btree"` - Key string `gorm:"column:key;type:varchar"` - Value string `gorm:"column:value;type:varchar"` - EnvVarSource storage.ContainerConfig_EnvironmentConfig_EnvVarSource `gorm:"column:envvarsource;type:integer"` - DeploymentsContainersRef DeploymentsContainers `gorm:"foreignKey:deployments_id,deployments_containers_idx;references:deployments_id,idx;belongsTo;constraint:OnDelete:CASCADE"` -} - -// DeploymentsContainersVolumes holds the Gorm model for Postgres table `deployments_containers_volumes`. -type DeploymentsContainersVolumes struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - DeploymentsContainersIdx int `gorm:"column:deployments_containers_idx;type:integer;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentscontainersvolumes_idx,type:btree"` - Name string `gorm:"column:name;type:varchar"` - Source string `gorm:"column:source;type:varchar"` - Destination string `gorm:"column:destination;type:varchar"` - ReadOnly bool `gorm:"column:readonly;type:bool"` - Type string `gorm:"column:type;type:varchar"` - DeploymentsContainersRef DeploymentsContainers `gorm:"foreignKey:deployments_id,deployments_containers_idx;references:deployments_id,idx;belongsTo;constraint:OnDelete:CASCADE"` -} - -// DeploymentsContainersSecrets holds the Gorm model for Postgres table `deployments_containers_secrets`. -type DeploymentsContainersSecrets struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - DeploymentsContainersIdx int `gorm:"column:deployments_containers_idx;type:integer;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentscontainerssecrets_idx,type:btree"` - Name string `gorm:"column:name;type:varchar"` - Path string `gorm:"column:path;type:varchar"` - DeploymentsContainersRef DeploymentsContainers `gorm:"foreignKey:deployments_id,deployments_containers_idx;references:deployments_id,idx;belongsTo;constraint:OnDelete:CASCADE"` -} - -// DeploymentsPorts holds the Gorm model for Postgres table `deployments_ports`. -type DeploymentsPorts struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentsports_idx,type:btree"` - ContainerPort int32 `gorm:"column:containerport;type:integer"` - Protocol string `gorm:"column:protocol;type:varchar"` - Exposure storage.PortConfig_ExposureLevel `gorm:"column:exposure;type:integer"` - DeploymentsRef Deployments `gorm:"foreignKey:deployments_id;references:id;belongsTo;constraint:OnDelete:CASCADE"` -} - -// DeploymentsPortsExposureInfos holds the Gorm model for Postgres table `deployments_ports_exposure_infos`. -type DeploymentsPortsExposureInfos struct { - DeploymentsID string `gorm:"column:deployments_id;type:uuid;primaryKey"` - DeploymentsPortsIdx int `gorm:"column:deployments_ports_idx;type:integer;primaryKey"` - Idx int `gorm:"column:idx;type:integer;primaryKey;index:deploymentsportsexposureinfos_idx,type:btree"` - Level storage.PortConfig_ExposureLevel `gorm:"column:level;type:integer"` - ServiceName string `gorm:"column:servicename;type:varchar"` - ServicePort int32 `gorm:"column:serviceport;type:integer"` - NodePort int32 `gorm:"column:nodeport;type:integer"` - ExternalIps *pq.StringArray `gorm:"column:externalips;type:text[]"` - ExternalHostnames *pq.StringArray `gorm:"column:externalhostnames;type:text[]"` - DeploymentsPortsRef DeploymentsPorts `gorm:"foreignKey:deployments_id,deployments_ports_idx;references:deployments_id,idx;belongsTo;constraint:OnDelete:CASCADE"` -} From 3caae190744d94e052a0160e786aff580ac6172c Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Tue, 27 Jan 2026 16:13:41 -0500 Subject: [PATCH 043/232] ROX-32844: policy connection leak (#18701) --- central/policy/datastore/datastore_impl.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/central/policy/datastore/datastore_impl.go b/central/policy/datastore/datastore_impl.go index ff23dca458514..8d99da3298ab7 100644 --- a/central/policy/datastore/datastore_impl.go +++ b/central/policy/datastore/datastore_impl.go @@ -268,7 +268,8 @@ func (ds *datastoreImpl) AddPolicy(ctx context.Context, policy *storage.Policy) } if findPolicyWithSameName(policyNameToPolicyMap, policy.GetName()) != nil { - return "", fmt.Errorf("Could not add policy due to name validation, policy with name %s already exists", policy.GetName()) + nameError := fmt.Errorf("Could not add policy due to name validation, policy with name %s already exists", policy.GetName()) + return "", ds.wrapWithRollback(ctx, tx, nameError) } policyutils.FillSortHelperFields(policy) // Any policy added after startup must be marked custom policy. From c660846332580743646abf6984b42f77d7daf308 Mon Sep 17 00:00:00 2001 From: Cong Du Date: Tue, 27 Jan 2026 13:20:22 -0800 Subject: [PATCH 044/232] ROX-32105: Replace baseImageInfo with baseImage graphql resolver (#18530) --- central/graphql/resolvers/gen/main.go | 9 +++ central/graphql/resolvers/generated.go | 82 ------------------------ central/graphql/resolvers/images.go | 72 ++++++++++++++++++++- central/graphql/resolvers/images_test.go | 42 +++++++++--- central/graphql/resolvers/images_v2.go | 7 ++ central/graphql/resolvers/test_utils.go | 8 ++- 6 files changed, 126 insertions(+), 94 deletions(-) diff --git a/central/graphql/resolvers/gen/main.go b/central/graphql/resolvers/gen/main.go index 29ac8258bac0b..3ca94ad62b263 100644 --- a/central/graphql/resolvers/gen/main.go +++ b/central/graphql/resolvers/gen/main.go @@ -69,6 +69,7 @@ var ( reflect.TypeOf((*v1.SearchResult)(nil)), }, SkipResolvers: []reflect.Type{ + reflect.TypeOf(storage.BaseImageInfo{}), reflect.TypeOf(storage.EmbeddedVulnerability{}), reflect.TypeOf(storage.EmbeddedImageScanComponent{}), reflect.TypeOf(storage.EmbeddedNodeScanComponent{}), @@ -82,6 +83,10 @@ var ( ParentType: reflect.TypeOf(storage.Image{}), FieldName: "Scan", }, + { + ParentType: reflect.TypeOf(storage.Image{}), + FieldName: "BaseImageInfo", + }, { ParentType: reflect.TypeOf(storage.ImageV2{}), FieldName: "Scan", @@ -90,6 +95,10 @@ var ( ParentType: reflect.TypeOf(storage.ImageV2{}), FieldName: "ScanStats", }, + { + ParentType: reflect.TypeOf(storage.ImageV2{}), + FieldName: "BaseImageInfo", + }, { ParentType: reflect.TypeOf(storage.ImageScan{}), FieldName: "Components", diff --git a/central/graphql/resolvers/generated.go b/central/graphql/resolvers/generated.go index b5e51cf34a994..253ca2ce23841 100644 --- a/central/graphql/resolvers/generated.go +++ b/central/graphql/resolvers/generated.go @@ -158,12 +158,6 @@ func registerGeneratedTypes(builder generator.SchemaBuilder) { utils.Must(builder.AddType("AzureProviderMetadata", []string{ "subscriptionId: String!", })) - utils.Must(builder.AddType("BaseImageInfo", []string{ - "baseImageDigest: String!", - "baseImageFullName: String!", - "baseImageId: String!", - "created: Time", - })) generator.RegisterProtoEnum(builder, reflect.TypeOf(storage.BooleanOperator(0))) utils.Must(builder.AddType("CSCC", []string{ "serviceAccount: String!", @@ -721,7 +715,6 @@ func registerGeneratedTypes(builder generator.SchemaBuilder) { "value: String!", })) utils.Must(builder.AddType("Image", []string{ - "baseImageInfo: [BaseImageInfo]!", "id: ID!", "isClusterLocal: Boolean!", "lastUpdated: Time", @@ -785,7 +778,6 @@ func registerGeneratedTypes(builder generator.SchemaBuilder) { })) generator.RegisterProtoEnum(builder, reflect.TypeOf(storage.ImageSignatureVerificationResult_Status(0))) utils.Must(builder.AddType("ImageV2", []string{ - "baseImageInfo: [BaseImageInfo]!", "digest: String!", "id: ID!", "isClusterLocal: Boolean!", @@ -3021,68 +3013,6 @@ func (resolver *azureProviderMetadataResolver) SubscriptionId(ctx context.Contex return value } -type baseImageInfoResolver struct { - ctx context.Context - root *Resolver - data *storage.BaseImageInfo -} - -func (resolver *Resolver) wrapBaseImageInfo(value *storage.BaseImageInfo, ok bool, err error) (*baseImageInfoResolver, error) { - if !ok || err != nil || value == nil { - return nil, err - } - return &baseImageInfoResolver{root: resolver, data: value}, nil -} - -func (resolver *Resolver) wrapBaseImageInfos(values []*storage.BaseImageInfo, err error) ([]*baseImageInfoResolver, error) { - if err != nil || len(values) == 0 { - return nil, err - } - output := make([]*baseImageInfoResolver, len(values)) - for i, v := range values { - output[i] = &baseImageInfoResolver{root: resolver, data: v} - } - return output, nil -} - -func (resolver *Resolver) wrapBaseImageInfoWithContext(ctx context.Context, value *storage.BaseImageInfo, ok bool, err error) (*baseImageInfoResolver, error) { - if !ok || err != nil || value == nil { - return nil, err - } - return &baseImageInfoResolver{ctx: ctx, root: resolver, data: value}, nil -} - -func (resolver *Resolver) wrapBaseImageInfosWithContext(ctx context.Context, values []*storage.BaseImageInfo, err error) ([]*baseImageInfoResolver, error) { - if err != nil || len(values) == 0 { - return nil, err - } - output := make([]*baseImageInfoResolver, len(values)) - for i, v := range values { - output[i] = &baseImageInfoResolver{ctx: ctx, root: resolver, data: v} - } - return output, nil -} - -func (resolver *baseImageInfoResolver) BaseImageDigest(ctx context.Context) string { - value := resolver.data.GetBaseImageDigest() - return value -} - -func (resolver *baseImageInfoResolver) BaseImageFullName(ctx context.Context) string { - value := resolver.data.GetBaseImageFullName() - return value -} - -func (resolver *baseImageInfoResolver) BaseImageId(ctx context.Context) string { - value := resolver.data.GetBaseImageId() - return value -} - -func (resolver *baseImageInfoResolver) Created(ctx context.Context) (*graphql.Time, error) { - value := resolver.data.GetCreated() - return protocompat.ConvertTimestampToGraphqlTimeOrError(value) -} - func toBooleanOperator(value *string) storage.BooleanOperator { if value != nil { return storage.BooleanOperator(storage.BooleanOperator_value[*value]) @@ -8649,12 +8579,6 @@ func (resolver *imageResolver) ensureData(ctx context.Context) { } } -func (resolver *imageResolver) BaseImageInfo(ctx context.Context) ([]*baseImageInfoResolver, error) { - resolver.ensureData(ctx) - value := resolver.data.GetBaseImageInfo() - return resolver.root.wrapBaseImageInfos(value, nil) -} - func (resolver *imageResolver) Id(ctx context.Context) graphql.ID { value := resolver.data.GetId() if resolver.data == nil { @@ -9352,12 +9276,6 @@ func (resolver *imageV2Resolver) ensureData(ctx context.Context) { } } -func (resolver *imageV2Resolver) BaseImageInfo(ctx context.Context) ([]*baseImageInfoResolver, error) { - resolver.ensureData(ctx) - value := resolver.data.GetBaseImageInfo() - return resolver.root.wrapBaseImageInfos(value, nil) -} - func (resolver *imageV2Resolver) Digest(ctx context.Context) string { resolver.ensureData(ctx) value := resolver.data.GetDigest() diff --git a/central/graphql/resolvers/images.go b/central/graphql/resolvers/images.go index afdb3a04b9659..ce9de9967635a 100644 --- a/central/graphql/resolvers/images.go +++ b/central/graphql/resolvers/images.go @@ -2,6 +2,7 @@ package resolvers import ( "context" + "slices" "time" "github.com/graph-gophers/graphql-go" @@ -48,9 +49,9 @@ type ImageResolver interface { Signature(ctx context.Context) (*imageSignatureResolver, error) SignatureVerificationData(ctx context.Context) (*imageSignatureVerificationDataResolver, error) TopCvss(ctx context.Context) float64 - BaseImageInfo(ctx context.Context) ([]*baseImageInfoResolver, error) UnknownCveCount(ctx context.Context) int32 + BaseImage(ctx context.Context) (*baseImageResolver, error) Deployments(ctx context.Context, args PaginatedQuery) ([]*deploymentResolver, error) DeploymentCount(ctx context.Context, args RawQuery) (int32, error) TopImageVulnerability(ctx context.Context, args RawQuery) (ImageVulnerabilityResolver, error) @@ -88,8 +89,14 @@ func registerImageWatchStatus(s string) string { func init() { schema := getBuilder() utils.Must( + schema.AddType("BaseImage", []string{ + "imageSha: String!", + "names: [String!]!", + "created: Time", + }), // NOTE: This list is and should remain alphabetically ordered schema.AddExtraResolvers("Image", []string{ + "baseImage: BaseImage", "deploymentCount(query: String): Int!", "deployments(query: String, pagination: Pagination): [Deployment!]!", "imageComponentCount(query: String): Int!", @@ -515,3 +522,66 @@ func (resolver *imageResolver) TopCvss(_ context.Context) float64 { } return float64(value) } + +func (resolver *imageResolver) BaseImage(ctx context.Context) (*baseImageResolver, error) { + resolver.ensureData(ctx) + baseImageInfos := resolver.data.GetBaseImageInfo() + return resolver.root.wrapBaseImage(baseImageInfos) +} + +// baseImageResolver resolves base image information +type baseImageResolver struct { + root *Resolver + data *baseImageData +} + +type baseImageData struct { + imageSha string + names []string + created *graphql.Time +} + +func (resolver *Resolver) wrapBaseImage(baseImageInfos []*storage.BaseImageInfo) (*baseImageResolver, error) { + if len(baseImageInfos) == 0 { + return nil, nil + } + + // All entries should have the same digest and create time, take the first one + imageSha := baseImageInfos[0].GetBaseImageDigest() + createTimestamp := baseImageInfos[0].GetCreated() + created, err := protocompat.ConvertTimestampToGraphqlTimeOrError(createTimestamp) + if err != nil { + return nil, err + } + + // Collect all full names + names := make([]string, 0, len(baseImageInfos)) + for _, info := range baseImageInfos { + names = append(names, info.GetBaseImageFullName()) + } + + // Stablize the names + slices.Sort(names) + data := &baseImageData{ + imageSha: imageSha, + names: names, + created: created, + } + + return &baseImageResolver{ + root: resolver, + data: data, + }, nil +} + +func (resolver *baseImageResolver) ImageSha(_ context.Context) string { + return resolver.data.imageSha +} + +func (resolver *baseImageResolver) Names(_ context.Context) []string { + return resolver.data.names +} + +func (resolver *baseImageResolver) Created(_ context.Context) (*graphql.Time, error) { + return resolver.data.created, nil +} diff --git a/central/graphql/resolvers/images_test.go b/central/graphql/resolvers/images_test.go index 66d4d68689c9d..661ddb5e34f1b 100644 --- a/central/graphql/resolvers/images_test.go +++ b/central/graphql/resolvers/images_test.go @@ -6,6 +6,7 @@ import ( "context" "strings" "testing" + "time" "github.com/graph-gophers/graphql-go" "github.com/stackrox/rox/central/graphql/resolvers/loaders" @@ -18,6 +19,7 @@ import ( "github.com/stackrox/rox/pkg/grpc/authz/allow" "github.com/stackrox/rox/pkg/pointers" "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stretchr/testify/assert" @@ -267,16 +269,38 @@ func (s *ImageResolversTestSuite) TestDeployments() { assert.Equal(t, int32(expectedCVESevCount.moderate), moderate.Total(testCtx)) assert.Equal(t, int32(expectedCVESevCount.low), low.Total(testCtx)) - // Test BaseImageInfo field for each image resolver. - expectedImage := expectedImages[imageID] - actualBaseImageInfo, err := image.BaseImageInfo(testCtx) + // Test BaseImage field + actualBaseImage, err := image.BaseImage(testCtx) assert.NoError(t, err) - assert.Len(t, actualBaseImageInfo, len(expectedImage.GetBaseImageInfo())) - for i, baseInfo := range actualBaseImageInfo { - expectedBaseInfo := expectedImage.GetBaseImageInfo()[i] - assert.Equal(t, expectedBaseInfo.GetBaseImageId(), baseInfo.BaseImageId(testCtx)) - assert.Equal(t, expectedBaseInfo.GetBaseImageFullName(), baseInfo.BaseImageFullName(testCtx)) - assert.Equal(t, expectedBaseInfo.GetBaseImageDigest(), baseInfo.BaseImageDigest(testCtx)) + + expectedImage, exists := expectedImages[imageID] + require.True(t, exists, "Expected image %s not found in expectedImages map", imageID) + baseImageInfos := expectedImage.GetBaseImageInfo() + if len(baseImageInfos) == 0 { + assert.Nil(t, actualBaseImage) + } else { + require.NotNil(t, actualBaseImage) + + // Test imageSha (should be the digest from the first base image info) + expectedSha := baseImageInfos[0].GetBaseImageDigest() + assert.Equal(t, expectedSha, actualBaseImage.ImageSha(testCtx)) + + // Test name array + expectedNames := []string{ + baseImageInfos[1].GetBaseImageFullName(), + baseImageInfos[0].GetBaseImageFullName(), + } + assert.Equal(t, expectedNames, actualBaseImage.Names(testCtx)) + + // Test created timestamp + actualCreated, err := actualBaseImage.Created(testCtx) + assert.NoError(t, err) + assert.NotNil(t, actualCreated) + expectedTimestamp, err := protocompat.ConvertTimeToTimestampOrError(time.Unix(0, 3000)) + assert.NoError(t, err) + expectedCreated, err := protocompat.ConvertTimestampToGraphqlTimeOrError(expectedTimestamp) + assert.NoError(t, err) + assert.Equal(t, expectedCreated, actualCreated) } // Test image -> deployments -> images diff --git a/central/graphql/resolvers/images_v2.go b/central/graphql/resolvers/images_v2.go index 94dbe92ecda64..9ea15e44e716a 100644 --- a/central/graphql/resolvers/images_v2.go +++ b/central/graphql/resolvers/images_v2.go @@ -23,6 +23,7 @@ func init() { schema := getBuilder() utils.Must( schema.AddExtraResolvers("ImageV2", []string{ + "baseImage: BaseImage", "deploymentCount(query: String): Int!", "deployments(query: String, pagination: Pagination): [Deployment!]!", "imageComponentCount(query: String): Int!", @@ -393,3 +394,9 @@ func (resolver *imageV2Resolver) FixableLowCveCount(ctx context.Context) int32 { resolver.ensureData(ctx) return resolver.data.GetScanStats().GetFixableLowCveCount() } + +func (resolver *imageV2Resolver) BaseImage(ctx context.Context) (*baseImageResolver, error) { + resolver.ensureData(ctx) + baseImageInfos := resolver.data.GetBaseImageInfo() + return resolver.root.wrapBaseImage(baseImageInfos) +} diff --git a/central/graphql/resolvers/test_utils.go b/central/graphql/resolvers/test_utils.go index bb113742a1e0a..d8ffb0e60db62 100644 --- a/central/graphql/resolvers/test_utils.go +++ b/central/graphql/resolvers/test_utils.go @@ -96,6 +96,8 @@ func testImages() []*storage.Image { utils.CrashOnError(err) t2, err := protocompat.ConvertTimeToTimestampOrError(time.Unix(0, 2000)) utils.CrashOnError(err) + t3, err := protocompat.ConvertTimeToTimestampOrError(time.Unix(0, 3000)) + utils.CrashOnError(err) return []*storage.Image{ { Id: "sha1", @@ -233,13 +235,15 @@ func testImages() []*storage.Image { BaseImageInfo: []*storage.BaseImageInfo{ { BaseImageId: "base-sha2", - BaseImageFullName: "alpine:3.12", + BaseImageFullName: "busybox:latest", BaseImageDigest: "sha256:alpine312", + Created: t3, }, { BaseImageId: "base-sha3", - BaseImageFullName: "busybox:latest", + BaseImageFullName: "alpine:3.12", BaseImageDigest: "sha256:busybox1", + Created: t3, }, }, }, From 8e04226c819b0f11438f0100676e6f9b7d854478 Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Wed, 28 Jan 2026 03:19:54 +0530 Subject: [PATCH 045/232] =?UTF-8?q?fix(be):=20Remove=20"hash:ignore"=20fro?= =?UTF-8?q?m=20EmbeddedVulnerability=20proto=20for=20up=E2=80=A6=20(#18697?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- generated/storage/vulnerability.pb.go | 2 +- proto/storage/vulnerability.proto | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/generated/storage/vulnerability.pb.go b/generated/storage/vulnerability.pb.go index d8d51d6e4a843..51f0e5f516239 100644 --- a/generated/storage/vulnerability.pb.go +++ b/generated/storage/vulnerability.pb.go @@ -158,7 +158,7 @@ type EmbeddedVulnerability struct { // Timestamp when the fix for this CVE was made available according to the sources // or the timestamp of the first scan after this field was introduced // which discovered this CVE as fixable, if it is. - FixAvailableTimestamp *timestamppb.Timestamp `protobuf:"bytes,25,opt,name=fix_available_timestamp,json=fixAvailableTimestamp,proto3" json:"fix_available_timestamp,omitempty" policy:"CVE Fix Available Timestamp" hash:"ignore"` // @gotags: policy:"CVE Fix Available Timestamp" hash:"ignore" + FixAvailableTimestamp *timestamppb.Timestamp `protobuf:"bytes,25,opt,name=fix_available_timestamp,json=fixAvailableTimestamp,proto3" json:"fix_available_timestamp,omitempty" policy:"CVE Fix Available Timestamp"` // @gotags: policy:"CVE Fix Available Timestamp" Severity VulnerabilitySeverity `protobuf:"varint,19,opt,name=severity,proto3,enum=storage.VulnerabilitySeverity" json:"severity,omitempty" policy:"Severity"` // @gotags: policy:"Severity" State VulnerabilityState `protobuf:"varint,20,opt,name=state,proto3,enum=storage.VulnerabilityState" json:"state,omitempty" search:"Vulnerability State"` // @gotags: search:"Vulnerability State" // cvss_metrics stores list of cvss scores from different sources like nvd, Redhat etc diff --git a/proto/storage/vulnerability.proto b/proto/storage/vulnerability.proto index ae19adc080c21..2a7a4fd2365e0 100644 --- a/proto/storage/vulnerability.proto +++ b/proto/storage/vulnerability.proto @@ -54,7 +54,7 @@ message EmbeddedVulnerability { // Timestamp when the fix for this CVE was made available according to the sources // or the timestamp of the first scan after this field was introduced // which discovered this CVE as fixable, if it is. - google.protobuf.Timestamp fix_available_timestamp = 25; // @gotags: policy:"CVE Fix Available Timestamp" hash:"ignore" + google.protobuf.Timestamp fix_available_timestamp = 25; // @gotags: policy:"CVE Fix Available Timestamp" VulnerabilitySeverity severity = 19; // @gotags: policy:"Severity" VulnerabilityState state = 20; // @gotags: search:"Vulnerability State" From 73082136f09f0d218052b0d0559dcd2d559b90f3 Mon Sep 17 00:00:00 2001 From: Saif Chaudhry Date: Tue, 27 Jan 2026 16:20:16 -0800 Subject: [PATCH 046/232] ROX-32647: Add base image assessment card to Image Details page (#18483) Co-authored-by: cdu --- .../WorkloadCves/Image/ImagePage.tsx | 1 + .../Image/ImagePageVulnerabilities.tsx | 10 ++ .../ImageComponentVulnerabilitiesTable.tsx | 2 +- .../components/BaseImageAssessmentCard.tsx | 100 ++++++++++++++++++ .../components/ImageDetailBadges.tsx | 17 +++ .../src/services/BaseImagesService.ts | 11 +- 6 files changed, 138 insertions(+), 3 deletions(-) create mode 100644 ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/BaseImageAssessmentCard.tsx diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePage.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePage.tsx index d1df3cfe47907..d120bf4a71c7a 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePage.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePage.tsx @@ -322,6 +322,7 @@ function ImagePage({ tag: '', } } + baseImage={imageData?.baseImage ?? null} refetchAll={refetchAll} pagination={pagination} vulnerabilityState={vulnerabilityState} diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageVulnerabilities.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageVulnerabilities.tsx index 7527ec1804ee9..a91943188a39d 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageVulnerabilities.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageVulnerabilities.tsx @@ -68,6 +68,8 @@ import { imageCVESearchFilterConfig, imageComponentSearchFilterConfig, } from '../../searchFilterConfig'; +import BaseImageAssessmentCard from '../components/BaseImageAssessmentCard'; +import type { BaseImage } from '../components/ImageDetailBadges'; export const imageVulnerabilitiesQuery = gql` ${imageMetadataContextFragment} @@ -127,6 +129,7 @@ export type ImagePageVulnerabilitiesProps = { remote: string; tag: string; }; + baseImage: BaseImage | null; refetchAll: () => void; pagination: UseURLPaginationResult; vulnerabilityState: VulnerabilityState; @@ -139,6 +142,7 @@ export type ImagePageVulnerabilitiesProps = { function ImagePageVulnerabilities({ imageId, imageName, + baseImage, refetchAll, pagination, vulnerabilityState, @@ -148,6 +152,7 @@ function ImagePageVulnerabilities({ setSearchFilter, }: ImagePageVulnerabilitiesProps) { const { isFeatureFlagEnabled } = useFeatureFlags(); + const isBaseImageDetectionEnabled = isFeatureFlagEnabled('ROX_BASE_IMAGE_DETECTION'); const isNewImageDataModelEnabled = isFeatureFlagEnabled('ROX_FLATTEN_IMAGE_DATA'); const { analyticsTrack } = useAnalytics(); @@ -291,6 +296,11 @@ function ImagePageVulnerabilities({ Review and triage vulnerability data scanned on this image + {isBaseImageDetectionEnabled && baseImage && ( + + + + )} { + setIsExpanded(expanded); + }; + + // Use the digest (imageSha) as the image ID for the detail link + const imageDetailPath = urlBuilder.imageDetails(baseImage.imageSha, 'OBSERVED'); + + return ( + + + + + + + {baseImage.names.length > 1 ? 'Image names' : 'Image name'} + + + + {baseImage.names.map((name) => ( + + ))} + + + + + Image digest + + + {baseImage.imageSha} + + + + {baseImage.created && ( + + Image age + + {getDistanceStrict(baseImage.created, new Date())} + + + )} + + + + + ); +} + +export default BaseImageAssessmentCard; diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx index be8a92bd3a764..3e9af5f586ddc 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx @@ -6,6 +6,12 @@ import type { SignatureVerificationResult } from '../../types'; import SignatureCountLabel from './SignatureCountLabel'; import VerifiedSignatureLabel, { getVerifiedSignatureInResults } from './VerifiedSignatureLabel'; +export type BaseImage = { + imageSha: string; + names: string[]; + created?: string; +}; + export type ImageDetails = { deploymentCount: number; operatingSystem: string; @@ -22,6 +28,7 @@ export type ImageDetails = { signatureVerificationData: { results: SignatureVerificationResult[]; } | null; + baseImage: BaseImage | null; }; export const imageDetailsFragment = gql` @@ -49,6 +56,11 @@ export const imageDetailsFragment = gql` verifierId } } + baseImage { + imageSha + names + created + } } `; @@ -77,6 +89,11 @@ export const imageV2DetailsFragment = gql` verifierId } } + baseImage { + imageSha + names + created + } } `; diff --git a/ui/apps/platform/src/services/BaseImagesService.ts b/ui/apps/platform/src/services/BaseImagesService.ts index 3a87b473d42d0..84d2d58ac38c2 100644 --- a/ui/apps/platform/src/services/BaseImagesService.ts +++ b/ui/apps/platform/src/services/BaseImagesService.ts @@ -15,6 +15,10 @@ export type BaseImagesResponse = { baseImageReferences: BaseImageReference[]; }; +export type CreateBaseImageReferenceResponse = { + baseImageReference: BaseImageReference; +}; + /** * Fetch the list of configured base images. */ @@ -32,8 +36,11 @@ export function addBaseImage( baseImageTagPattern: string ): Promise { return axios - .post(baseImagesUrl, { baseImageRepoPath, baseImageTagPattern }) - .then((response) => response.data); + .post(baseImagesUrl, { + baseImageRepoPath, + baseImageTagPattern, + }) + .then((response) => response.data.baseImageReference); } /** From 93212ec334c3c6899864da0e1d9ebad28b4dd4a5 Mon Sep 17 00:00:00 2001 From: Guzman Date: Wed, 28 Jan 2026 09:42:15 +0100 Subject: [PATCH 047/232] ROX-32316: Rate limit VM index reports before being queued (#18692) Co-authored-by: Piotr Rygielski <114479+vikin91@users.noreply.github.com> --- .../service/connection/connection_impl.go | 52 +++ .../sensor/service/connection/manager_impl.go | 31 ++ .../pipeline/virtualmachineindex/pipeline.go | 134 +----- .../virtualmachineindex/pipeline_test.go | 392 +----------------- pkg/env/virtualmachine.go | 2 +- pkg/rate/limiter.go | 82 +++- pkg/rate/limiter_test.go | 167 ++++++-- 7 files changed, 289 insertions(+), 571 deletions(-) diff --git a/central/sensor/service/connection/connection_impl.go b/central/sensor/service/connection/connection_impl.go index 3b57b49686008..f5103d60566a2 100644 --- a/central/sensor/service/connection/connection_impl.go +++ b/central/sensor/service/connection/connection_impl.go @@ -23,6 +23,8 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/administration/events" + adminResources "github.com/stackrox/rox/pkg/administration/events/resources" "github.com/stackrox/rox/pkg/booleanpolicy/policyversion" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/concurrency" @@ -32,6 +34,7 @@ import ( "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/postgres/pgutils" "github.com/stackrox/rox/pkg/protoconv/schedule" + "github.com/stackrox/rox/pkg/rate" "github.com/stackrox/rox/pkg/reflectutils" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/safe" @@ -81,6 +84,13 @@ type sensorConnection struct { capabilities set.Set[centralsensor.SensorCapability] hashDeduper hashManager.Deduper + + rl rateLimiter + adminEventsStream events.Stream +} + +type rateLimiter interface { + TryConsume(clientID string, msg *central.MsgFromSensor) (allowed bool, reason string) } func newConnection(ctx context.Context, @@ -97,6 +107,8 @@ func newConnection(ctx context.Context, hashMgr hashManager.Manager, complianceOperatorMgr common.ComplianceOperatorManager, initSyncMgr *initSyncManager, + rl rateLimiter, + adminEventsStream events.Stream, ) *sensorConnection { conn := &sensorConnection{ @@ -120,6 +132,8 @@ func newConnection(ctx context.Context, sensorHello: sensorHello, capabilities: set.NewSet(sliceutils. FromStringSlice[centralsensor.SensorCapability](sensorHello.GetCapabilities()...)...), + rl: rl, + adminEventsStream: adminEventsStream, } // Need a reference to conn for injector @@ -161,6 +175,19 @@ func (c *sensorConnection) multiplexedPush(ctx context.Context, msg *central.Msg return } + allowed, reason := c.rl.TryConsume(c.clusterID, msg) + if !allowed { + logging.GetRateLimitedLogger().WarnL( + "vm_index_reports_rate_limiter", + "Request is rate-limited for cluster %s and event type %s. Reason: %s", + c.clusterID, + event.GetEventTypeWithoutPrefix(msg.GetEvent().GetResource()), + reason, + ) + c.emitRateLimitedAdminEvent(c.clusterID, reason) + return + } + typ := reflectutils.Type(msg.GetMsg()) queue := queues[typ] if queue == nil { @@ -181,6 +208,31 @@ func (c *sensorConnection) multiplexedPush(ctx context.Context, msg *central.Msg queue.Push(msg) } +func (c *sensorConnection) emitRateLimitedAdminEvent(clusterID, reason string) { + if c.adminEventsStream == nil { + return + } + // The texts are tuned for the rate.ReasonRateLimitExceeded, so skip adding log entry if the reason is different. + if reason != rate.ReasonRateLimitExceeded { + return + } + + c.adminEventsStream.Produce(&events.AdministrationEvent{ + Type: storage.AdministrationEventType_ADMINISTRATION_EVENT_TYPE_GENERIC, + Level: storage.AdministrationEventLevel_ADMINISTRATION_EVENT_LEVEL_WARNING, + Domain: events.DefaultDomain, + Message: fmt.Sprintf("VM index reports from cluster %s are being rate limited: %s", clusterID, reason), + ResourceType: adminResources.Cluster, + ResourceID: clusterID, + Hint: fmt.Sprintf("VM index reports are being rate limited to avoid overwhelming the system. "+ + "Consider either: (1) scaling up the Scanner V4 deployments and increasing values of %s or %s, "+ + "or (2) reducing the index-report frequency in roxagents running in the Virtual Machines.", + env.VMIndexReportRateLimit.EnvVar(), + env.VMIndexReportBucketCapacity.EnvVar(), + ), + }) +} + func getSensorMessageTypeString(msg *central.MsgFromSensor) string { messageType := reflectutils.Type(msg.GetMsg()) var eventType string diff --git a/central/sensor/service/connection/manager_impl.go b/central/sensor/service/connection/manager_impl.go index 6d3e7426f758a..61762872fc408 100644 --- a/central/sensor/service/connection/manager_impl.go +++ b/central/sensor/service/connection/manager_impl.go @@ -11,13 +11,17 @@ import ( "github.com/stackrox/rox/central/sensor/service/pipeline" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/administration/events" + adminEventStream "github.com/stackrox/rox/pkg/administration/events/stream" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/clusterhealth" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/errorhelpers" "github.com/stackrox/rox/pkg/errox" "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/protoconv" + "github.com/stackrox/rox/pkg/rate" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/sync" @@ -73,6 +77,8 @@ type manager struct { complianceOperatorMgr common.ComplianceOperatorManager initSyncMgr *initSyncManager autoTriggerUpgrades *concurrency.Flag + rateLimiter *rate.Limiter + adminEventsStream events.Stream } // NewManager returns a new connection manager @@ -81,9 +87,27 @@ func NewManager(mgr hashManager.Manager) Manager { connectionsByClusterID: make(map[string]connectionAndUpgradeController), manager: mgr, initSyncMgr: NewInitSyncManager(), + rateLimiter: newVMIndexReportRateLimiter(), + adminEventsStream: adminEventStream.Singleton(), } } +func newVMIndexReportRateLimiter() *rate.Limiter { + rl, err := rate.NewLimiter( + "vm_index_reports", + env.VMIndexReportRateLimit.FloatSetting(), + env.VMIndexReportBucketCapacity.IntegerSetting()). + ForWorkload(func(msg *central.MsgFromSensor) bool { + return msg.GetEvent().GetVirtualMachineIndexReport() != nil + }) + + if err != nil { + utils.Should(errors.Wrap(err, "Creating rate-limiter for VM index reports")) + } + + return rl +} + func (m *manager) initializeUpgradeControllers() error { clusters, err := m.clusters.GetClusters(managerCtx) if err != nil { @@ -252,6 +276,8 @@ func (m *manager) CloseConnection(clusterID string) { if err := m.manager.Delete(ctx, clusterID); err != nil { log.Errorf("deleting cluster id %q from hash manager: %v", clusterID, err) } + + m.rateLimiter.OnClientDisconnect(clusterID) } func (m *manager) HandleConnection(ctx context.Context, sensorHello *central.SensorHello, cluster *storage.Cluster, eventPipeline pipeline.ClusterPipeline, server central.SensorService_CommunicateServer) error { @@ -278,7 +304,12 @@ func (m *manager) HandleConnection(ctx context.Context, sensorHello *central.Sen m.manager, m.complianceOperatorMgr, m.initSyncMgr, + m.rateLimiter, + m.adminEventsStream, ) + + defer m.rateLimiter.OnClientDisconnect(clusterID) + ctx = WithConnection(ctx, conn) oldConnection, err := m.replaceConnection(ctx, cluster, conn) diff --git a/central/sensor/service/pipeline/virtualmachineindex/pipeline.go b/central/sensor/service/pipeline/virtualmachineindex/pipeline.go index f3c898d045c72..7c91848821402 100644 --- a/central/sensor/service/pipeline/virtualmachineindex/pipeline.go +++ b/central/sensor/service/pipeline/virtualmachineindex/pipeline.go @@ -2,42 +2,22 @@ package virtualmachineindex import ( "context" - "fmt" - "strconv" "github.com/pkg/errors" countMetrics "github.com/stackrox/rox/central/metrics" "github.com/stackrox/rox/central/sensor/service/common" - "github.com/stackrox/rox/central/sensor/service/connection" "github.com/stackrox/rox/central/sensor/service/pipeline" "github.com/stackrox/rox/central/sensor/service/pipeline/reconciliation" vmDatastore "github.com/stackrox/rox/central/virtualmachine/datastore" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/administration/events" - adminResources "github.com/stackrox/rox/pkg/administration/events/resources" - adminEventStream "github.com/stackrox/rox/pkg/administration/events/stream" "github.com/stackrox/rox/pkg/centralsensor" - "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/metrics" - "github.com/stackrox/rox/pkg/rate" vmEnricher "github.com/stackrox/rox/pkg/virtualmachine/enricher" ) -const ( - // rateLimiterWorkload is the workload name used for rate limiting VM index reports. - rateLimiterWorkload = "vm_index_report" -) - -// rateLimiter defines the interface for rate limiting operations used by this pipeline. -// This interface is satisfied by *rate.Limiter and allows for easier testing. -type rateLimiter interface { - TryConsume(clientID string) (allowed bool, reason string) - OnClientDisconnect(clientID string) -} - var ( log = logging.LoggerForModule() @@ -46,47 +26,27 @@ var ( // GetPipeline returns an instantiation of this particular pipeline func GetPipeline() pipeline.Fragment { - rateLimit, err := strconv.ParseFloat(env.VMIndexReportRateLimit.Setting(), 64) - if err != nil { - log.Warnf("Invalid %s value: %v. Using fallback value of 0.3", env.VMIndexReportRateLimit.EnvVar(), err) - rateLimit = 0.3 // Keep in sync with the default value in env.VMIndexReportRateLimit. - } - bucketCapacity := env.VMIndexReportBucketCapacity.IntegerSetting() - rateLimiter, err := rate.NewLimiter(rateLimiterWorkload, rateLimit, bucketCapacity) - if err != nil { - log.Errorf("Failed to create rate limiter for %s: %v", rateLimiterWorkload, err) - } return newPipeline( vmDatastore.Singleton(), vmEnricher.Singleton(), - rateLimiter, - adminEventStream.Singleton(), ) } // newPipeline returns a new instance of Pipeline. -func newPipeline(vms vmDatastore.DataStore, enricher vmEnricher.VirtualMachineEnricher, rl rateLimiter, adminEventsStream events.Stream) pipeline.Fragment { + +func newPipeline(vms vmDatastore.DataStore, enricher vmEnricher.VirtualMachineEnricher) pipeline.Fragment { return &pipelineImpl{ - vmDatastore: vms, - enricher: enricher, - rateLimiter: rl, - adminEventsStream: adminEventsStream, + vmDatastore: vms, + enricher: enricher, } } type pipelineImpl struct { - vmDatastore vmDatastore.DataStore - enricher vmEnricher.VirtualMachineEnricher - rateLimiter rateLimiter - adminEventsStream events.Stream + vmDatastore vmDatastore.DataStore + enricher vmEnricher.VirtualMachineEnricher } -func (p *pipelineImpl) OnFinish(clusterID string) { - // Notify rate limiter that this client (Sensor) has disconnected so it can rebalance the limiters. - if p.rateLimiter != nil { - p.rateLimiter.OnClientDisconnect(clusterID) - } -} +func (p *pipelineImpl) OnFinish(string) {} func (p *pipelineImpl) Capabilities() []centralsensor.CentralCapability { return []centralsensor.CentralCapability{centralsensor.VirtualMachinesSupported} @@ -126,34 +86,6 @@ func (p *pipelineImpl) Run(ctx context.Context, clusterID string, msg *central.M return errors.New("missing cluster ID in pipeline context") } - // Extract connection for capability checks; cluster ID is taken from the pipeline argument. - conn := connection.FromContext(ctx) - - // Rate limit check. Drop message if rate limiter is misconfigured (defensive behavior against misconfiguration) - // or rate limit exceeded. Afterwards, send NACK to Sensor if Sensor supports it. - if p.rateLimiter == nil { - logging.GetRateLimitedLogger().ErrorL( - "vm_index_report_nil_rate_limiter", - "No rate limiter found for workload %q. Dropping VM index report from cluster %s", - rateLimiterWorkload, - clusterID, - ) - if conn != nil && conn.HasCapability(centralsensor.SensorACKSupport) { - sendVMIndexReportResponse(ctx, clusterID, index.GetId(), central.SensorACK_NACK, "rate limiter not configured", injector) - } - return nil // Don't return error - would cause pipeline retry - } - - allowed, reason := p.rateLimiter.TryConsume(clusterID) - if !allowed { - p.emitRateLimitedAdminEvent(clusterID, reason) - log.Infof("Dropping VM index report %s from cluster %s: %s", index.GetId(), clusterID, reason) - if conn != nil && conn.HasCapability(centralsensor.SensorACKSupport) { - sendVMIndexReportResponse(ctx, clusterID, index.GetId(), central.SensorACK_NACK, reason, injector) - } - return nil // Don't return error - would cause pipeline retry - } - // Get or create VM vm := &storage.VirtualMachine{Id: index.GetId()} @@ -177,57 +109,5 @@ func (p *pipelineImpl) Run(ctx context.Context, clusterID string, msg *central.M log.Debugf("Successfully enriched and stored VM %s with %d components", vm.GetId(), len(vm.GetScan().GetComponents())) - // Send ACK to Sensor if Sensor supports it - if conn != nil && conn.HasCapability(centralsensor.SensorACKSupport) { - sendVMIndexReportResponse(ctx, clusterID, index.GetId(), central.SensorACK_ACK, "", injector) - } return nil } - -func (p *pipelineImpl) emitRateLimitedAdminEvent(clusterID, reason string) { - if p.adminEventsStream == nil { - return - } - // The texts are tuned for the rate.ReasonRateLimitExceeded, so skip adding log entry if the reason is different. - if reason != rate.ReasonRateLimitExceeded { - return - } - - p.adminEventsStream.Produce(&events.AdministrationEvent{ - Type: storage.AdministrationEventType_ADMINISTRATION_EVENT_TYPE_GENERIC, - Level: storage.AdministrationEventLevel_ADMINISTRATION_EVENT_LEVEL_WARNING, - Domain: events.DefaultDomain, - Message: fmt.Sprintf("VM index reports from cluster %s are being rate limited: %s", clusterID, reason), - ResourceType: adminResources.Cluster, - ResourceID: clusterID, - Hint: fmt.Sprintf("VM index reports are being rate limited to avoid overwhelming the system. "+ - "Consider either: (1) scaling up the Scanner V4 deployments and increasing values of %s or %s, "+ - "or (2) reducing the index-report frequency in roxagents running in the Virtual Machines.", - env.VMIndexReportRateLimit.EnvVar(), - env.VMIndexReportBucketCapacity.EnvVar(), - ), - }) -} - -// sendVMIndexReportResponse sends an ACK or NACK for a VM index report. -func sendVMIndexReportResponse(ctx context.Context, clusterID, vmID string, action central.SensorACK_Action, reason string, injector common.MessageInjector) { - if injector == nil { - log.Debugf("Cannot send %s to Sensor for cluster %s - no injector", action.String(), clusterID) - return - } - msg := ¢ral.MsgToSensor{ - Msg: ¢ral.MsgToSensor_SensorAck{ - SensorAck: ¢ral.SensorACK{ - Action: action, - MessageType: central.SensorACK_VM_INDEX_REPORT, - ResourceId: vmID, - Reason: reason, - }, - }, - } - if err := injector.InjectMessage(ctx, msg); err != nil { - log.Warnf("Failed sending VM index report %s for VM %s in cluster %s: %v", action.String(), vmID, clusterID, err) - } else { - log.Debugf("Sent VM index report %s for VM %s in cluster %s (reason=%q)", action.String(), vmID, clusterID, reason) - } -} diff --git a/central/sensor/service/pipeline/virtualmachineindex/pipeline_test.go b/central/sensor/service/pipeline/virtualmachineindex/pipeline_test.go index 60f6a152656f2..1635e2b45ec11 100644 --- a/central/sensor/service/pipeline/virtualmachineindex/pipeline_test.go +++ b/central/sensor/service/pipeline/virtualmachineindex/pipeline_test.go @@ -3,29 +3,18 @@ package virtualmachineindex import ( "context" "testing" - "time" "github.com/pkg/errors" - "github.com/stackrox/rox/central/sensor/service/common" - "github.com/stackrox/rox/central/sensor/service/connection" - connMocks "github.com/stackrox/rox/central/sensor/service/connection/mocks" "github.com/stackrox/rox/central/sensor/service/pipeline/reconciliation" vmDatastoreMocks "github.com/stackrox/rox/central/virtualmachine/datastore/mocks" "github.com/stackrox/rox/generated/internalapi/central" v4 "github.com/stackrox/rox/generated/internalapi/scanner/v4" v1 "github.com/stackrox/rox/generated/internalapi/virtualmachine/v1" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/administration/events" - adminResources "github.com/stackrox/rox/pkg/administration/events/resources" - adminEventStream "github.com/stackrox/rox/pkg/administration/events/stream" "github.com/stackrox/rox/pkg/centralsensor" - "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/features" - "github.com/stackrox/rox/pkg/rate" - "github.com/stackrox/rox/pkg/sync" vmEnricherMocks "github.com/stackrox/rox/pkg/virtualmachine/enricher/mocks" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" ) @@ -36,13 +25,6 @@ const ( var ctx = context.Background() -// mustNewLimiter creates a rate limiter or fails the test. -func mustNewLimiter(t require.TestingT, workloadName string, globalRate float64, bucketCapacity int) *rate.Limiter { - limiter, err := rate.NewLimiter(workloadName, globalRate, bucketCapacity) - require.NoError(t, err) - return limiter -} - func TestPipeline(t *testing.T) { suite.Run(t, new(PipelineTestSuite)) } @@ -61,12 +43,9 @@ func (suite *PipelineTestSuite) SetupTest() { suite.mockCtrl = gomock.NewController(suite.T()) suite.vmDatastore = vmDatastoreMocks.NewMockDataStore(suite.mockCtrl) suite.enricher = vmEnricherMocks.NewMockVirtualMachineEnricher(suite.mockCtrl) - // Use unlimited rate limiter for tests (rate=0) - rateLimiter := mustNewLimiter(suite.T(), "test", 0, 50) suite.pipeline = &pipelineImpl{ vmDatastore: suite.vmDatastore, enricher: suite.enricher, - rateLimiter: rateLimiter, } } @@ -197,17 +176,13 @@ func (suite *PipelineTestSuite) TestGetPipeline() { func (suite *PipelineTestSuite) TestNewPipeline() { mockDatastore := vmDatastoreMocks.NewMockDataStore(suite.mockCtrl) mockEnricher := vmEnricherMocks.NewMockVirtualMachineEnricher(suite.mockCtrl) - rateLimiter := mustNewLimiter(suite.T(), "test", 0, 50) - adminEventsStream := adminEventStream.GetStreamForTesting(suite.T()) - pipeline := newPipeline(mockDatastore, mockEnricher, rateLimiter, adminEventsStream) + pipeline := newPipeline(mockDatastore, mockEnricher) suite.NotNil(pipeline) impl, ok := pipeline.(*pipelineImpl) suite.True(ok, "Should return pipelineImpl instance") suite.Equal(mockDatastore, impl.vmDatastore) suite.Equal(mockEnricher, impl.enricher) - suite.Equal(rateLimiter, impl.rateLimiter) - suite.Equal(adminEventsStream, impl.adminEventsStream) } // Test table-driven approach for different actions @@ -254,11 +229,9 @@ func TestPipelineRun_DifferentActions(t *testing.T) { vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - rateLimiter := mustNewLimiter(t, "test", 0, 50) pipeline := &pipelineImpl{ vmDatastore: vmDatastore, enricher: enricher, - rateLimiter: rateLimiter, } vmID := "vm-1" @@ -299,10 +272,8 @@ func TestPipelineEdgeCases(t *testing.T) { defer ctrl.Finish() vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - rateLimiter := mustNewLimiter(t, "test", 0, 50) pipeline := &pipelineImpl{ vmDatastore: vmDatastore, - rateLimiter: rateLimiter, } t.Run("nil message", func(t *testing.T) { @@ -359,11 +330,9 @@ func TestPipelineRun_DisabledFeature(t *testing.T) { vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - rateLimiter := mustNewLimiter(t, "test", 0, 50) pipeline := &pipelineImpl{ vmDatastore: vmDatastore, enricher: enricher, - rateLimiter: rateLimiter, } vmID := "vm-1" @@ -373,362 +342,3 @@ func TestPipelineRun_DisabledFeature(t *testing.T) { assert.NoError(t, err) } - -// TestPipelineRun_RateLimitDisabled tests that rate limiting is disabled when configured with 0 -func TestPipelineRun_RateLimitDisabled(t *testing.T) { - t.Setenv(features.VirtualMachines.EnvVar(), "true") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - rateLimiter := mustNewLimiter(t, "test", 0, 50) // Disabled - - pipeline := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: rateLimiter, - } - - vmID := "vm-1" - msg := createVMIndexMessage(vmID, central.ResourceAction_SYNC_RESOURCE) - - // Should process all 100 requests without rate limiting - for i := range 100 { - enricher.EXPECT(). - EnrichVirtualMachineWithVulnerabilities(gomock.Any(), gomock.Any()). - Return(nil) - vmDatastore.EXPECT(). - UpdateVirtualMachineScan(ctx, vmID, gomock.Any()). - Return(nil) - - err := pipeline.Run(ctx, testClusterID, msg, nil) - assert.NoError(t, err, "request %d should succeed with rate limiting disabled", i) - } -} - -// TestPipelineRun_RateLimitEnabled tests that rate limiting rejects requests when enabled. -// This test verifies that: -// 1. First N requests (within burst) succeed and perform enrichment/datastore writes -// 2. Rate-limited request does NOT perform enrichment or datastore writes -// 3. A NACK is sent for rate-limited requests when ACK support is enabled -func TestPipelineRun_RateLimitEnabled(t *testing.T) { - t.Setenv(features.VirtualMachines.EnvVar(), "true") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - rateLimiter := mustNewLimiter(t, "test", 5, 5) // 5 req/s, bucket capacity=5 - - // Recording injector to capture sent messages - injector := &recordingInjector{} - - // Mock connection with SensorACKSupport capability - mockConn := connMocks.NewMockSensorConnection(ctrl) - mockConn.EXPECT().HasCapability(centralsensor.SensorACKSupport).Return(true).AnyTimes() - - pipeline := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: rateLimiter, - } - - vmID := "vm-1" - msg := createVMIndexMessage(vmID, central.ResourceAction_SYNC_RESOURCE) - - // Build a context with the mocked connection that has SensorACKSupport - ctxWithConn := connection.WithConnection(context.Background(), mockConn) - - // Expect enrichment and datastore writes ONLY for the first 5 (non-rate-limited) requests. - // The 6th request should be rate-limited and these methods should NOT be called. - enricher.EXPECT(). - EnrichVirtualMachineWithVulnerabilities(gomock.Any(), gomock.Any()). - Return(nil). - Times(5) - - vmDatastore.EXPECT(). - UpdateVirtualMachineScan(gomock.Any(), vmID, gomock.Any()). - Return(nil). - Times(5) - - // Send 6 requests - the first 5 should be processed successfully, - // the 6th should be rate-limited. - for i := range 6 { - err := pipeline.Run(ctxWithConn, testClusterID, msg, injector) - assert.NoError(t, err, "Run should not return an error even when rate-limited (request %d)", i+1) - } - - // Verify ACKs were sent for successful requests and NACK for rate-limited request - acks := injector.getSentACKs() - require.Len(t, acks, 6, "expected 6 ACK/NACK messages (5 ACKs + 1 NACK)") - - // First 5 should be ACKs - for i := range 5 { - assert.Equal(t, central.SensorACK_ACK, acks[i].GetAction(), "request %d should be ACKed", i+1) - assert.Equal(t, central.SensorACK_VM_INDEX_REPORT, acks[i].GetMessageType()) - } - - // 6th should be NACK - assert.Equal(t, central.SensorACK_NACK, acks[5].GetAction(), "request 6 should be NACKed (rate limited)") - assert.Equal(t, central.SensorACK_VM_INDEX_REPORT, acks[5].GetMessageType()) - assert.Contains(t, acks[5].GetReason(), "rate limit exceeded") -} - -// TestPipelineRun_RateLimitEnabled_NoACKSupport tests that when the connection -// does not support SensorACKSupport, rate limiting still applies but no ACK/NACK -// messages are sent. -func TestPipelineRun_RateLimitEnabled_NoACKSupport(t *testing.T) { - t.Setenv(features.VirtualMachines.EnvVar(), "true") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - rateLimiter := mustNewLimiter(t, "test-no-ack", 5, 5) // 5 req/s, bucket capacity=5 - - // Recording injector to capture any ACK/NACK attempts - should remain empty. - injector := &recordingInjector{} - - // Mock connection WITHOUT SensorACKSupport capability - mockConn := connMocks.NewMockSensorConnection(ctrl) - mockConn.EXPECT().HasCapability(centralsensor.SensorACKSupport).Return(false).AnyTimes() - - pipeline := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: rateLimiter, - } - - vmID := "vm-1" - msg := createVMIndexMessage(vmID, central.ResourceAction_SYNC_RESOURCE) - - // Build a context with the mocked connection that does NOT have SensorACKSupport - ctxWithConn := connection.WithConnection(context.Background(), mockConn) - - // Expect enrichment and datastore writes ONLY for the first 5 (non-rate-limited) requests. - enricher.EXPECT(). - EnrichVirtualMachineWithVulnerabilities(gomock.Any(), gomock.Any()). - Return(nil). - Times(5) - - vmDatastore.EXPECT(). - UpdateVirtualMachineScan(gomock.Any(), vmID, gomock.Any()). - Return(nil). - Times(5) - - // Send 6 requests - the first 5 should be processed successfully, - // the 6th should be rate-limited. - for i := range 6 { - err := pipeline.Run(ctxWithConn, testClusterID, msg, injector) - assert.NoError(t, err, "Run should not return an error even when rate-limited (request %d)", i+1) - } - - // Verify NO ACK/NACK messages were sent (SensorACKSupport is not available) - acks := injector.getSentACKs() - assert.Empty(t, acks, "no ACK/NACKs should be sent when SensorACKSupport is not available") -} - -func TestPipelineRun_RateLimitEmitsAdminEvent(t *testing.T) { - tests := map[string]struct { - reason string - }{ - "should emit admin event when rate limited": { - reason: rate.ReasonRateLimitExceeded, - }, - } - - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - t.Setenv(features.VirtualMachines.EnvVar(), "true") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - adminEventsStream := adminEventStream.GetStreamForTesting(t) - - pipeline := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: &denyingRateLimiter{reason: tt.reason}, - adminEventsStream: adminEventsStream, - } - - msg := createVMIndexMessage("vm-1", central.ResourceAction_SYNC_RESOURCE) - - err := pipeline.Run(ctx, testClusterID, msg, nil) - assert.NoError(t, err) - - receivedEvent := consumeAdminEvent(t, adminEventsStream) - require.NotNil(t, receivedEvent, "expected an administration event to be emitted") - assert.Equal(t, storage.AdministrationEventType_ADMINISTRATION_EVENT_TYPE_GENERIC, receivedEvent.GetType()) - assert.Equal(t, storage.AdministrationEventLevel_ADMINISTRATION_EVENT_LEVEL_WARNING, receivedEvent.GetLevel()) - assert.Equal(t, events.DefaultDomain, receivedEvent.GetDomain()) - assert.Equal(t, adminResources.Cluster, receivedEvent.GetResourceType()) - assert.Equal(t, testClusterID, receivedEvent.GetResourceID()) - assert.Contains(t, receivedEvent.GetMessage(), "VM index reports from cluster "+testClusterID) - assert.Contains(t, receivedEvent.GetMessage(), tt.reason) - }) - } -} - -// TestPipelineRun_NilRateLimiter_WithACKSupport tests behavior when the rateLimiter is nil and ACKs are supported. -// This covers the nil-limiter branch and verifies that: -// 1. No enrichment/datastore calls occur -// 2. A NACK with MessageType=VM_INDEX_REPORT is sent -func TestPipelineRun_NilRateLimiter_WithACKSupport(t *testing.T) { - t.Setenv(features.VirtualMachines.EnvVar(), "true") - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Mocks for datastore and enricher - no expectations should be set on these, - // because the pipeline must short-circuit before doing any work. - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - - // Recording injector to capture sent messages - injector := &recordingInjector{} - - // Mock connection with SensorACKSupport capability - mockConn := connMocks.NewMockSensorConnection(ctrl) - mockConn.EXPECT().HasCapability(centralsensor.SensorACKSupport).Return(true).AnyTimes() - - pipeline := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: nil, // nil rate limiter to cover the nil-limiter branch - } - - vmID := "vm-1" - msg := createVMIndexMessage(vmID, central.ResourceAction_SYNC_RESOURCE) - - // Build a context with the mocked connection that has SensorACKSupport - ctxWithConn := connection.WithConnection(context.Background(), mockConn) - - // Run the pipeline - it should short-circuit due to nil rateLimiter, - // emit a NACK, and not call any datastore/enricher methods. - err := pipeline.Run(ctxWithConn, testClusterID, msg, injector) - assert.NoError(t, err, "pipeline Run should not error when rateLimiter is nil") - - // Verify exactly one NACK was sent - acks := injector.getSentACKs() - require.Len(t, acks, 1, "expected exactly one ACK/NACK to be sent") - - ack := acks[0] - assert.Equal(t, central.SensorACK_NACK, ack.GetAction(), "expected NACK action") - assert.Equal(t, central.SensorACK_VM_INDEX_REPORT, ack.GetMessageType(), "expected VM_INDEX_REPORT message type") - assert.Equal(t, vmID, ack.GetResourceId(), "expected resource ID to match VM ID") - assert.Equal(t, "rate limiter not configured", ack.GetReason(), "expected reason to indicate nil rate limiter") -} - -// recordingInjector is a test double that records all SensorACK messages sent via InjectMessage. -var _ common.MessageInjector = (*recordingInjector)(nil) - -type recordingInjector struct { - lock sync.Mutex - messages []*central.SensorACK -} - -func (r *recordingInjector) InjectMessage(_ concurrency.Waitable, msg *central.MsgToSensor) error { - r.lock.Lock() - defer r.lock.Unlock() - if ack := msg.GetSensorAck(); ack != nil { - r.messages = append(r.messages, ack.CloneVT()) - } - return nil -} - -func (r *recordingInjector) InjectMessageIntoQueue(_ *central.MsgFromSensor) {} - -func (r *recordingInjector) getSentACKs() []*central.SensorACK { - r.lock.Lock() - defer r.lock.Unlock() - copied := make([]*central.SensorACK, 0, len(r.messages)) - copied = append(copied, r.messages...) - return copied -} - -// TestOnFinishPropagatesClusterDisconnect verifies that OnFinish propagates the cluster ID -// to the rate limiter's OnClientDisconnect method. -func TestOnFinishPropagatesClusterDisconnect(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - - // Use a fake limiter so we can observe calls to OnClientDisconnect. - fakeLimiter := &fakeRateLimiter{} - - p := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: fakeLimiter, - } - - const clusterID = "cluster-1" - - p.OnFinish(clusterID) - - assert.Equal(t, clusterID, fakeLimiter.lastDisconnectedClientID, "OnFinish should propagate cluster disconnect to the rate limiter") -} - -// TestOnFinishWithNilRateLimiter verifies that OnFinish doesn't panic when rateLimiter is nil. -func TestOnFinishWithNilRateLimiter(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - vmDatastore := vmDatastoreMocks.NewMockDataStore(ctrl) - enricher := vmEnricherMocks.NewMockVirtualMachineEnricher(ctrl) - - p := &pipelineImpl{ - vmDatastore: vmDatastore, - enricher: enricher, - rateLimiter: nil, - } - - // Should not panic - assert.NotPanics(t, func() { - p.OnFinish("cluster-1") - }) -} - -// fakeRateLimiter is a test double that records the last client ID passed to OnClientDisconnect. -// It satisfies the interface used by pipelineImpl.rateLimiter. -type fakeRateLimiter struct { - lastDisconnectedClientID string -} - -func (f *fakeRateLimiter) TryConsume(_ string) (bool, string) { - return true, "" -} - -func (f *fakeRateLimiter) OnClientDisconnect(clientID string) { - f.lastDisconnectedClientID = clientID -} - -type denyingRateLimiter struct { - reason string -} - -func (d *denyingRateLimiter) TryConsume(_ string) (bool, string) { - return false, d.reason -} - -func (d *denyingRateLimiter) OnClientDisconnect(string) {} - -func consumeAdminEvent(t *testing.T, stream events.Stream) *events.AdministrationEvent { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - var receivedEvent *events.AdministrationEvent - for event := range stream.Consume(ctx) { - receivedEvent = event - break - } - - return receivedEvent -} diff --git a/pkg/env/virtualmachine.go b/pkg/env/virtualmachine.go index 9acf6a9df5ca2..2b521bf10926f 100644 --- a/pkg/env/virtualmachine.go +++ b/pkg/env/virtualmachine.go @@ -37,7 +37,7 @@ var ( // As VM scanning is only one of potentially many other workloads, we set the default to 0.3 requests per second. // For larger clusters, the rate limit could be increased to up to 3.0 requests per second only if the // scanner-v4-matcher and the scanner-v4-db are able to handle the load! - VMIndexReportRateLimit = RegisterSetting("ROX_VM_INDEX_REPORT_RATE_LIMIT", WithDefault("0.3")) + VMIndexReportRateLimit = RegisterFloatSetting("ROX_VM_INDEX_REPORT_RATE_LIMIT", 0.3) // VMIndexReportBucketCapacity defines the token bucket capacity for VM index report rate limiting. // This is the maximum number of requests that can be accepted in a burst before rate limiting kicks in. diff --git a/pkg/rate/limiter.go b/pkg/rate/limiter.go index cb98e3f9fcf3e..ab4cc8b276161 100644 --- a/pkg/rate/limiter.go +++ b/pkg/rate/limiter.go @@ -9,6 +9,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/sync" @@ -48,6 +49,8 @@ type Limiter struct { buckets map[string]*gorate.Limiter numClients int + acceptsFn func(msg *central.MsgFromSensor) bool + clock Clock // time source (injectable for testing) } @@ -59,6 +62,38 @@ func (RealClock) Now() time.Time { return time.Now() } +// limiterOption is an intermediary type for creating a rate limiter. +// It forces the caller to define the workload type to which the rate limiter would react. +type limiterOption struct { + l *Limiter + err error +} + +// ForAllWorkloads configures the rate limiter to analyze all types of messages (including nil). +func (lo *limiterOption) ForAllWorkloads() (*Limiter, error) { + if lo.l == nil { + return nil, lo.err + } + lo.l.acceptsFn = func(msg *central.MsgFromSensor) bool { + return true + } + return lo.l, lo.err +} + +// ForWorkload allows specifying a function that should return `true` if the given MsgFromSensor is to be evaluated by +// the rate-limiter (and later accepted or rejected) and `false` if the rate-limiter should ignore this message +// (allowing it to pass). This function must handle `nil` arguments and execute quickly. +func (lo *limiterOption) ForWorkload(acceptsFn func(msg *central.MsgFromSensor) bool) (*Limiter, error) { + if lo.l == nil { + return nil, lo.err + } + if acceptsFn == nil { + return nil, errors.New("acceptsFn must not be nil") + } + lo.l.acceptsFn = acceptsFn + return lo.l, lo.err +} + // NewLimiter creates a new per-client rate limiter for the given workload. // globalRate of 0 disables rate limiting (unlimited). // bucketCapacity is the max tokens per client bucket (allows temporary bursts above sustained rate). @@ -68,22 +103,22 @@ func (RealClock) Now() time.Time { // - workloadName is empty // - globalRate is negative // - bucketCapacity is less than 1 -func NewLimiter(workloadName string, globalRate float64, bucketCapacity int) (*Limiter, error) { +func NewLimiter(workloadName string, globalRate float64, bucketCapacity int) *limiterOption { return NewLimiterWithClock(workloadName, globalRate, bucketCapacity, RealClock{}) } // NewLimiterWithClock creates a new per-client rate limiter with an injectable clock. // This is primarily useful for testing to control time and avoid flaky tests. // For production use, prefer NewLimiter which uses the real system clock. -func NewLimiterWithClock(workloadName string, globalRate float64, bucketCapacity int, clock Clock) (*Limiter, error) { +func NewLimiterWithClock(workloadName string, globalRate float64, bucketCapacity int, clock Clock) *limiterOption { if workloadName == "" { - return nil, ErrEmptyWorkloadName + return &limiterOption{nil, ErrEmptyWorkloadName} } if globalRate < 0 { - return nil, ErrNegativeRate + return &limiterOption{nil, ErrNegativeRate} } if bucketCapacity < 1 { - return nil, ErrInvalidBucketCapacity + return &limiterOption{nil, ErrInvalidBucketCapacity} } // Initialize metrics for this workload so they're visible in Prometheus immediately. @@ -94,19 +129,30 @@ func NewLimiterWithClock(workloadName string, globalRate float64, bucketCapacity PerClientRate.WithLabelValues(workloadName).Set(globalRate) PerClientBucketCapacity.WithLabelValues(workloadName).Set(float64(bucketCapacity)) - return &Limiter{ - workloadName: workloadName, - globalRate: globalRate, - bucketCapacity: bucketCapacity, - buckets: make(map[string]*gorate.Limiter), - clock: clock, - }, nil + return &limiterOption{ + l: &Limiter{ + workloadName: workloadName, + globalRate: globalRate, + bucketCapacity: bucketCapacity, + buckets: make(map[string]*gorate.Limiter), + clock: clock, + }, + err: nil, + } } // TryConsume attempts to consume one token for the given client. // Returns true if allowed, false if rate limit exceeded. // Metrics are automatically recorded. -func (l *Limiter) TryConsume(clientID string) (allowed bool, reason string) { +func (l *Limiter) TryConsume(clientID string, msg *central.MsgFromSensor) (allowed bool, reason string) { + if l == nil { + return true, "nil rate limiter" + } + if !l.accepts(msg) { + // This is not the correct rateLimiter to evaluate this request - allow request to pass. + return true, "" + } + if l.globalRate <= 0 { // Rate limiting disabled, but still record metrics for visibility into request volume. RequestsTotal.WithLabelValues(l.workloadName, OutcomeAccepted).Inc() @@ -125,6 +171,13 @@ func (l *Limiter) TryConsume(clientID string) (allowed bool, reason string) { return false, ReasonRateLimitExceeded } +func (l *Limiter) accepts(msg *central.MsgFromSensor) bool { + if l.acceptsFn == nil { + return true + } + return l.acceptsFn(msg) +} + // getOrCreateLimiter returns the rate limiter for a given client, creating one if needed. // When a new client is added, all limiters are rebalanced to maintain fairness. func (l *Limiter) getOrCreateLimiter(clientID string) *gorate.Limiter { @@ -185,6 +238,9 @@ func (l *Limiter) perClientBucketCapacity(numClients int) int { // OnClientDisconnect removes a client from rate limiting and rebalances remaining limiters. // This should be called when a client connection is terminated. func (l *Limiter) OnClientDisconnect(clientID string) { + if l == nil { + return + } if l.globalRate <= 0 { // Rate limiting disabled return diff --git a/pkg/rate/limiter_test.go b/pkg/rate/limiter_test.go index 97d04d9161e02..579145acb961f 100644 --- a/pkg/rate/limiter_test.go +++ b/pkg/rate/limiter_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/sync" "github.com/stretchr/testify/assert" @@ -40,7 +41,7 @@ func (c *TestClock) Advance(d time.Duration) { // mustNewLimiter creates a limiter or fails the test. func mustNewLimiter(t *testing.T, workloadName string, globalRate float64, bucketCapacity int) *Limiter { t.Helper() - limiter, err := NewLimiter(workloadName, globalRate, bucketCapacity) + limiter, err := NewLimiter(workloadName, globalRate, bucketCapacity).ForAllWorkloads() require.NoError(t, err) return limiter } @@ -48,51 +49,127 @@ func mustNewLimiter(t *testing.T, workloadName string, globalRate float64, bucke // mustNewLimiterWithClock creates a limiter with an injectable clock or fails the test. func mustNewLimiterWithClock(t *testing.T, workloadName string, globalRate float64, bucketCapacity int, clock Clock) *Limiter { t.Helper() - limiter, err := NewLimiterWithClock(workloadName, globalRate, bucketCapacity, clock) + limiter, err := NewLimiterWithClock(workloadName, globalRate, bucketCapacity, clock).ForAllWorkloads() require.NoError(t, err) return limiter } func TestNewLimiter(t *testing.T) { t.Run("should create a new limiter", func(t *testing.T) { - limiter, err := NewLimiter(workloadName, 10.0, 50) + limiter, err := NewLimiter(workloadName, 10.0, 50).ForAllWorkloads() require.NoError(t, err) assert.Equal(t, 10.0, limiter.GlobalRate()) assert.Equal(t, 50, limiter.BucketCapacity()) assert.Equal(t, workloadName, limiter.WorkloadName()) }) t.Run("should create a new limiter with rate limiting disabled", func(t *testing.T) { - limiter, err := NewLimiter(workloadName, 0.0, 1) // rate=0 disables limiting, bucketCapacity still must be >= 1 + limiter, err := NewLimiter(workloadName, 0.0, 1).ForAllWorkloads() // rate=0 disables limiting, bucketCapacity still must be >= 1 require.NoError(t, err) assert.Equal(t, 0.0, limiter.GlobalRate()) assert.Equal(t, 1, limiter.BucketCapacity()) assert.Equal(t, workloadName, limiter.WorkloadName()) }) t.Run("should create a new limiter with rate higher than bucket capacity", func(t *testing.T) { - limiter, err := NewLimiter(workloadName, 50.0, 2) + limiter, err := NewLimiter(workloadName, 50.0, 2).ForAllWorkloads() require.NoError(t, err) assert.Equal(t, 50.0, limiter.GlobalRate()) assert.Equal(t, 2, limiter.BucketCapacity()) assert.Equal(t, workloadName, limiter.WorkloadName()) }) t.Run("should error on empty workload name", func(t *testing.T) { - _, err := NewLimiter("", 10.0, 50) + _, err := NewLimiter("", 10.0, 50).ForAllWorkloads() assert.ErrorIs(t, err, ErrEmptyWorkloadName) }) t.Run("should error on negative rate", func(t *testing.T) { - _, err := NewLimiter(workloadName, -1.0, 50) + _, err := NewLimiter(workloadName, -1.0, 50).ForAllWorkloads() assert.ErrorIs(t, err, ErrNegativeRate) }) t.Run("should error on zero bucket capacity", func(t *testing.T) { - _, err := NewLimiter(workloadName, 10.0, 0) + _, err := NewLimiter(workloadName, 10.0, 0).ForAllWorkloads() assert.ErrorIs(t, err, ErrInvalidBucketCapacity) }) } +func TestLimiterOption_ForWorkload(t *testing.T) { + tests := map[string]struct { + workloadName string + globalRate float64 + bucketCapacity int + acceptsFn func(msg *central.MsgFromSensor) bool + expectedErr error + expectedErrText string + expectLimiter bool + shouldSkipCheck bool + }{ + "should return error when acceptsFn is nil": { + workloadName: workloadName, + globalRate: 1, + bucketCapacity: 1, + acceptsFn: nil, + expectedErrText: "acceptsFn must not be nil", + }, + "should return error when limiter creation fails": { + workloadName: "", + globalRate: 1, + bucketCapacity: 1, + acceptsFn: func(msg *central.MsgFromSensor) bool { + return msg != nil + }, + expectedErr: ErrEmptyWorkloadName, + }, + "should allow configuring a workload filter": { + workloadName: workloadName, + globalRate: 1, + bucketCapacity: 1, + acceptsFn: func(msg *central.MsgFromSensor) bool { + return msg != nil + }, + expectLimiter: true, + shouldSkipCheck: true, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + limiter, err := NewLimiter(tt.workloadName, tt.globalRate, tt.bucketCapacity).ForWorkload(tt.acceptsFn) + if tt.expectedErr != nil || tt.expectedErrText != "" { + require.Error(t, err) + if tt.expectedErr != nil { + assert.ErrorIs(t, err, tt.expectedErr) + } + if tt.expectedErrText != "" { + assert.EqualError(t, err, tt.expectedErrText) + } + assert.Nil(t, limiter) + return + } + require.NoError(t, err) + if tt.expectLimiter { + assert.NotNil(t, limiter) + } + if !tt.shouldSkipCheck { + return + } + + allowed, reason := limiter.TryConsume("client-1", ¢ral.MsgFromSensor{}) + require.True(t, allowed) + assert.Empty(t, reason) + + allowed, reason = limiter.TryConsume("client-1", ¢ral.MsgFromSensor{}) + assert.False(t, allowed) + assert.Equal(t, ReasonRateLimitExceeded, reason) + + allowed, reason = limiter.TryConsume("client-1", nil) + assert.True(t, allowed) + assert.Empty(t, reason) + }) + } +} + func TestTryConsume_Disabled(t *testing.T) { limiter := mustNewLimiter(t, "test", 0, 5) for i := range 100 { - allowed, reason := limiter.TryConsume("test-cluster") + allowed, reason := limiter.TryConsume("test-cluster", nil) assert.True(t, allowed, "request %d should be allowed when rate limiting is disabled", i) assert.Empty(t, reason) } @@ -104,24 +181,24 @@ func TestTryConsume_SingleClient(t *testing.T) { // With 1 client, per-client burst = 50/1 = 50 requests for i := range 50 { - allowed, reason := limiter.TryConsume("client-1") + allowed, reason := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "request %d should be allowed within burst", i) assert.Empty(t, reason) } // Time is frozen - no new tokens can be added between requests. // 51st request should be rejected (burst exhausted) - allowed, reason := limiter.TryConsume("client-1") + allowed, reason := limiter.TryConsume("client-1", nil) assert.False(t, allowed, "request should be rejected after burst exhausted") assert.Equal(t, "rate limit exceeded", reason) // Advance time and verify tokens refill correctly clock.Advance(500 * time.Millisecond) // At 10 req/s, expect ~5 tokens for i := range 5 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "request %d should be allowed after time advance", i) } - allowed, _ = limiter.TryConsume("client-1") + allowed, _ = limiter.TryConsume("client-1", nil) assert.False(t, allowed, "should be rejected after consuming refilled tokens") } @@ -141,26 +218,26 @@ func TestTryConsume_MultipleClients_Fairness(t *testing.T) { // Exhaust burst for client-1 (20 requests) for i := range 20 { - allowed, _ := limiter.TryConsume(client1) + allowed, _ := limiter.TryConsume(client1, nil) assert.True(t, allowed, "client-1 request %d should be allowed", i) } - allowed, _ := limiter.TryConsume(client1) + allowed, _ := limiter.TryConsume(client1, nil) assert.False(t, allowed, "client-1 should be rate limited after burst") // client-2 and client-3 should still have their full burst capacity for i := range 20 { - allowed, _ := limiter.TryConsume(client2) + allowed, _ := limiter.TryConsume(client2, nil) assert.True(t, allowed, "client-2 request %d should be allowed", i) } for i := range 20 { - allowed, _ := limiter.TryConsume(client3) + allowed, _ := limiter.TryConsume(client3, nil) assert.True(t, allowed, "client-3 request %d should be allowed", i) } // All clients exhausted - time is frozen so no tokens refilled - allowed, _ = limiter.TryConsume(client2) + allowed, _ = limiter.TryConsume(client2, nil) assert.False(t, allowed, "client-2 should be rate limited") - allowed, _ = limiter.TryConsume(client3) + allowed, _ = limiter.TryConsume(client3, nil) assert.False(t, allowed, "client-3 should be rate limited") } @@ -171,19 +248,19 @@ func TestTryConsume_Rebalancing(t *testing.T) { // Start with client-1: per-client burst = 100/1 = 100 for i := range 100 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "client-1 initial request %d should be allowed", i) } - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.False(t, allowed, "client-1 should be rate limited after initial burst") // Add client-2: rebalances to per-client burst = 100/2 = 50 // client-2 gets fresh bucket with capacity 50 for i := range 50 { - allowed, _ := limiter.TryConsume("client-2") + allowed, _ := limiter.TryConsume("client-2", nil) assert.True(t, allowed, "client-2 request %d should be allowed after rebalancing", i) } - allowed, _ = limiter.TryConsume("client-2") + allowed, _ = limiter.TryConsume("client-2", nil) assert.False(t, allowed, "client-2 should be rate limited after burst") // Advance time for token refill (at 5 req/s per client, 7 tokens refill in 1.4 seconds) @@ -192,9 +269,9 @@ func TestTryConsume_Rebalancing(t *testing.T) { // Both clients should get ~7 tokens back (5 req/s * 1.5s) // Verify we can make a few requests for range 5 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "client-1 should have refilled tokens") - allowed, _ = limiter.TryConsume("client-2") + allowed, _ = limiter.TryConsume("client-2", nil) assert.True(t, allowed, "client-2 should have refilled tokens") } } @@ -206,12 +283,12 @@ func TestTryConsume_BurstWindow(t *testing.T) { // With 1 client, per-client burst = 100/1 = 100 for i := range 100 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "request %d should be allowed within burst", i) } // 101st request rejected - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.False(t, allowed, "request should be rejected after burst exhausted") // Advance time for refill (at 10 req/s, 15 tokens refill in 1.5 seconds) @@ -219,7 +296,7 @@ func TestTryConsume_BurstWindow(t *testing.T) { // Should get ~15 tokens back - verify we can make at least 10 requests for range 10 { - allowed, _ = limiter.TryConsume("client-1") + allowed, _ = limiter.TryConsume("client-1", nil) assert.True(t, allowed, "should have refilled tokens") } } @@ -263,28 +340,28 @@ func TestRebalancing_DynamicClientCount(t *testing.T) { // Client 1: gets 30/1 = 30 req/s, burst = 30*10s = 300 for range 300 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed) } // Add client 2: rebalances to 30/2 = 15 req/s each, burst = 15*10s = 150 for range 150 { - allowed, _ := limiter.TryConsume("client-2") + allowed, _ := limiter.TryConsume("client-2", nil) assert.True(t, allowed) } // Add client 3: rebalances to 30/3 = 10 req/s each, burst = 10*10s = 100 for range 100 { - allowed, _ := limiter.TryConsume("client-3") + allowed, _ := limiter.TryConsume("client-3", nil) assert.True(t, allowed) } // All clients should be limited now - time is frozen - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.False(t, allowed) - allowed, _ = limiter.TryConsume("client-2") + allowed, _ = limiter.TryConsume("client-2", nil) assert.False(t, allowed) - allowed, _ = limiter.TryConsume("client-3") + allowed, _ = limiter.TryConsume("client-3", nil) assert.False(t, allowed) // Advance time for token refill (at 10 req/s per client, 15 tokens refill in 1.5s) @@ -292,15 +369,27 @@ func TestRebalancing_DynamicClientCount(t *testing.T) { // Each client should get ~15 tokens back - verify at least 10 work for range 10 { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed, "client-1 should get tokens after refill") - allowed, _ = limiter.TryConsume("client-2") + allowed, _ = limiter.TryConsume("client-2", nil) assert.True(t, allowed, "client-2 should get tokens after refill") - allowed, _ = limiter.TryConsume("client-3") + allowed, _ = limiter.TryConsume("client-3", nil) assert.True(t, allowed, "client-3 should get tokens after refill") } } +func TestOnClientDisconnect_Nil(t *testing.T) { + clock := NewTestClock(time.Now()) + // intentionally broken rate-limiter + limiter, err := NewLimiterWithClock(workloadName, -1.0, -2.0, clock).ForAllWorkloads() + assert.Error(t, err) + assert.Nil(t, limiter) + assert.NotPanics(t, func() { + limiter.OnClientDisconnect("client-2") + limiter.TryConsume("client-2", nil) + }) +} + func TestOnClientDisconnect(t *testing.T) { clock := NewTestClock(time.Now()) limiter := mustNewLimiterWithClock(t, "test", 20, 100, clock) // rate=20 req/s, bucket capacity=100 @@ -311,10 +400,10 @@ func TestOnClientDisconnect(t *testing.T) { // Exhaust client-1's burst for i := 0; i < 50; i++ { - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.True(t, allowed) } - allowed, _ := limiter.TryConsume("client-1") + allowed, _ := limiter.TryConsume("client-1", nil) assert.False(t, allowed, "client-1 should be limited after burst") // Disconnect client-2 From 8df9ea7debc445b0312ff902ed8c77cd7ff65536 Mon Sep 17 00:00:00 2001 From: Mauro Ezequiel Moltrasio Date: Wed, 28 Jan 2026 11:04:38 +0100 Subject: [PATCH 048/232] chore(fact): Bump version and limit monitored paths (#18677) --- FACT_VERSION | 2 +- .../helm/stackrox-secured-cluster/templates/collector.yaml.htpl | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/FACT_VERSION b/FACT_VERSION index 6454293b3009a..2ac86b8890de7 100644 --- a/FACT_VERSION +++ b/FACT_VERSION @@ -1 +1 @@ -0.1.x-103-gacf65d8889 +0.2.x diff --git a/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl b/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl index f69e20656052d..27f318e8ccc9b 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl +++ b/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl @@ -129,6 +129,8 @@ spec: value: "/var/run/secrets/stackrox.io/certs/" - name: FACT_HOST_MOUNT value: "/host" + - name: FACT_PATHS + value: "/etc/ssh/sshd_config:/etc/sudoers:/etc/passwd:/etc/shadow" {{- include "srox.envVars" (list . "daemonset" "collector" "fact") | nindent 8 }} resources: {{- ._rox.collector._sfaResources | nindent 10 }} From b7db2fabacd4c38c6e21ef586d13b9f57282ec44 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Wed, 28 Jan 2026 05:32:49 -0500 Subject: [PATCH 049/232] ROX-32845: cluster CVE connection leak (#18702) --- central/cve/cluster/datastore/store/postgres/full_store.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/central/cve/cluster/datastore/store/postgres/full_store.go b/central/cve/cluster/datastore/store/postgres/full_store.go index 987ba71cb6ba8..d2e59163d339f 100644 --- a/central/cve/cluster/datastore/store/postgres/full_store.go +++ b/central/cve/cluster/datastore/store/postgres/full_store.go @@ -62,10 +62,16 @@ func (s *fullStoreImpl) DeleteClusterCVEsForCluster(ctx context.Context, cluster _, err = tx.Exec(ctx, "DELETE FROM "+clusterCVEEdgeTable+" WHERE clusterid = $1", uuid.FromStringOrNil(clusterID)) if err != nil { + if err := tx.Rollback(ctx); err != nil { + return err + } return err } _, err = tx.Exec(ctx, "DELETE FROM "+clusterCVEsTable+" WHERE not exists (select "+clusterCVEEdgeTable+".cveid from "+clusterCVEEdgeTable+" where "+clusterCVEEdgeTable+".cveid = "+clusterCVEsTable+".id)") if err != nil { + if err := tx.Rollback(ctx); err != nil { + return err + } return err } return tx.Commit(ctx) From f60fa24dd3738a4d7eaa0da7a200380c06d250a5 Mon Sep 17 00:00:00 2001 From: Guzman Date: Wed, 28 Jan 2026 13:10:37 +0100 Subject: [PATCH 050/232] ROX-32731: Increase default VM index report bucket capacity to 200 (#18719) Co-authored-by: Claude Sonnet 4.5 --- pkg/env/virtualmachine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/env/virtualmachine.go b/pkg/env/virtualmachine.go index 2b521bf10926f..b12ce039e86f9 100644 --- a/pkg/env/virtualmachine.go +++ b/pkg/env/virtualmachine.go @@ -43,6 +43,6 @@ var ( // This is the maximum number of requests that can be accepted in a burst before rate limiting kicks in. // For example, with capacity=15 and rate=3 req/sec, a sensor can send up to 15 requests instantly, // then must wait for 5 seconds for tokens to refill at the rate limit. - // Default: 5 tokens - VMIndexReportBucketCapacity = RegisterIntegerSetting("ROX_VM_INDEX_REPORT_BUCKET_CAPACITY", 5).WithMinimum(1) + // Default: 200 tokens + VMIndexReportBucketCapacity = RegisterIntegerSetting("ROX_VM_INDEX_REPORT_BUCKET_CAPACITY", 200).WithMinimum(1) ) From 2859177364ded4172e29505e3511e2b7732ff120 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Wed, 28 Jan 2026 13:32:25 +0100 Subject: [PATCH 051/232] ROX-32810: prune dynamic rbac objects (#18650) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michaël Petrov --- .../internaltokens/service/role_manager.go | 49 +-- .../service/role_manager_test.go | 20 +- .../internaltokens/service/service_impl.go | 21 +- .../service/service_impl_test.go | 23 +- .../internaltokens/service/timeutils_test.go | 29 ++ .../storagetov1/auth_m2m_config_test.go | 2 + central/convert/storagetov1/traits_test.go | 2 + central/graphql/resolvers/generated.go | 6 + central/pruning/pruning.go | 59 ++++ central/pruning/pruning_test.go | 294 +++++++++++++++++- central/pruning/singleton.go | 2 + central/role/datastore/datastore.go | 3 + central/role/datastore/datastore_impl.go | 173 ++++++++++- central/role/datastore/mocks/datastore.go | 45 +++ generated/api/v1/auth_service.swagger.json | 5 + .../api/v1/authprovider_service.swagger.json | 5 + generated/api/v1/group_service.swagger.json | 13 + .../api/v1/notifier_service.swagger.json | 5 + generated/api/v1/role_service.swagger.json | 5 + ...signature_integration_service.swagger.json | 5 + generated/storage/traits.pb.go | 43 ++- generated/storage/traits_vtproto.pb.go | 92 ++++++ proto/storage/proto.lock | 10 + proto/storage/traits.proto | 8 + 24 files changed, 838 insertions(+), 81 deletions(-) create mode 100644 central/auth/internaltokens/service/timeutils_test.go diff --git a/central/auth/internaltokens/service/role_manager.go b/central/auth/internaltokens/service/role_manager.go index b024a78bfc944..75440a8c033e4 100644 --- a/central/auth/internaltokens/service/role_manager.go +++ b/central/auth/internaltokens/service/role_manager.go @@ -5,6 +5,7 @@ import ( "fmt" "slices" "strings" + "time" "github.com/pkg/errors" clusterDatastore "github.com/stackrox/rox/central/cluster/datastore" @@ -12,6 +13,7 @@ import ( v1 "github.com/stackrox/rox/generated/internalapi/central/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/declarativeconfig" + "github.com/stackrox/rox/pkg/protocompat" ) const ( @@ -34,22 +36,29 @@ type roleManager struct { roleStore roleDatastore.DataStore } -var ( - generatedObjectTraits = &storage.Traits{Origin: storage.Traits_DYNAMIC} -) +// generateTraitsWithExpiry creates traits for dynamically generated RBAC +// objects with an expiry time. The expiry time determines when these objects +// are eligible for pruning by the garbage collector. +func generateTraitsWithExpiry(expiresAt time.Time) (*storage.Traits, error) { + ts, err := protocompat.ConvertTimeToTimestampOrError(expiresAt) + return &storage.Traits{ + Origin: storage.Traits_DYNAMIC, + ExpiresAt: ts, + }, err +} -// createPermissionSet creates a dynamic permission set, granting the requested permissions. -// The returned information is the ID of the created permission set, or an error if any occurred -// in the creation process. +// createPermissionSet creates a dynamic permission set, granting the requested +// permissions. The returned information is the ID of the created permission +// set, or an error if any occurred in the creation process. func (rm *roleManager) createPermissionSet( ctx context.Context, req *v1.GenerateTokenForPermissionsAndScopeRequest, + traits *storage.Traits, ) (string, error) { - // TODO: Consider pruning the generated permission sets after some idle time. permissionSet := &storage.PermissionSet{ Description: permissionSetDescription, ResourceToAccess: make(map[string]storage.Access), - Traits: generatedObjectTraits.CloneVT(), + Traits: traits, } var b strings.Builder permissions := req.GetPermissions() @@ -90,18 +99,18 @@ func convertAccess(in v1.Access) storage.Access { } } -// createAccessScope creates a dynamic access scope, granting the requested scope. -// The returned information is the identifier of the created access scope, -// or an error if any occurred in the creation process. +// createAccessScope creates a dynamic access scope, granting the requested +// scope. The returned information is the identifier of the created access +// scope, or an error if any occurred in the creation process. func (rm *roleManager) createAccessScope( ctx context.Context, req *v1.GenerateTokenForPermissionsAndScopeRequest, + traits *storage.Traits, ) (string, error) { - // TODO: Consider pruning the generated access scopes after some idle time. accessScope := &storage.SimpleAccessScope{ Description: accessScopeDescription, Rules: &storage.SimpleAccessScope_Rules{}, - Traits: generatedObjectTraits.CloneVT(), + Traits: traits, } var b strings.Builder fullAccessClusters := make([]string, 0) @@ -150,19 +159,19 @@ func (rm *roleManager) createAccessScope( return accessScope.GetId(), nil } -// createRole creates a dynamic role, granting the requested permissions and scope. -// The returned information is the name of the created role, or an error if any occurred -// in the creation process. +// createRole creates a dynamic role, granting the requested permissions and +// scope. The returned information is the name of the created role, or an error +// if any occurred in the creation process. func (rm *roleManager) createRole( ctx context.Context, req *v1.GenerateTokenForPermissionsAndScopeRequest, + traits *storage.Traits, ) (string, error) { - // TODO: Consider pruning the generated roles after some idle time. - permissionSetID, err := rm.createPermissionSet(ctx, req) + permissionSetID, err := rm.createPermissionSet(ctx, req, traits) if err != nil { return "", errors.Wrap(err, "creating permission set for role") } - accessScopeID, err := rm.createAccessScope(ctx, req) + accessScopeID, err := rm.createAccessScope(ctx, req, traits) if err != nil { return "", errors.Wrap(err, "creating access scope for role") } @@ -171,7 +180,7 @@ func (rm *roleManager) createRole( Description: roleDescription, PermissionSetId: permissionSetID, AccessScopeId: accessScopeID, - Traits: generatedObjectTraits.CloneVT(), + Traits: traits, } err = rm.roleStore.UpsertRole(ctx, resultRole) if err != nil { diff --git a/central/auth/internaltokens/service/role_manager_test.go b/central/auth/internaltokens/service/role_manager_test.go index 2c1bc8df5aa16..a8f0d9f409e9d 100644 --- a/central/auth/internaltokens/service/role_manager_test.go +++ b/central/auth/internaltokens/service/role_manager_test.go @@ -109,7 +109,7 @@ func TestCreatePermissionSet(t *testing.T) { Times(1). Return(tc.expectedStoreError) - psID, err := roleMgr.createPermissionSet(ctx, tc.input) + psID, err := roleMgr.createPermissionSet(ctx, tc.input, testExpiredTraits) if tc.expectedStoreError != nil { assert.Empty(it, psID) @@ -296,7 +296,7 @@ func TestCreateAccessScope(t *testing.T) { Times(1). Return(tc.expectedStoreError) - asID, err := roleMgr.createAccessScope(ctx, tc.input) + asID, err := roleMgr.createAccessScope(ctx, tc.input, testExpiredTraits) if tc.expectedStoreError != nil { assert.Empty(it, asID) @@ -324,7 +324,7 @@ func TestCreateAccessScope(t *testing.T) { Times(1). Return("", false, errDummy) - accessScopeId, err := roleMgr.createAccessScope(ctx, input) + accessScopeId, err := roleMgr.createAccessScope(ctx, input, testExpiredTraits) assert.Empty(it, accessScopeId) assert.ErrorIs(it, err, errDummy) }) @@ -355,7 +355,7 @@ func TestCreateAccessScope(t *testing.T) { Times(1). Return(nil) - accessScopeId, err := roleMgr.createAccessScope(ctx, input) + accessScopeId, err := roleMgr.createAccessScope(ctx, input, testExpiredTraits) assert.Equal(it, expectedAccessScope.GetId(), accessScopeId) assert.NoError(it, err) }) @@ -458,7 +458,7 @@ func TestCreateRole(t *testing.T) { mockRoleStore, ) - roleName, err := roleMgr.createRole(ctx, tc.input) + roleName, err := roleMgr.createRole(ctx, tc.input, testExpiredTraits) if tc.expectedRoleStoreError != nil { assert.Empty(it, roleName) @@ -498,7 +498,7 @@ func TestCreateRole(t *testing.T) { setClusterStoreExpectations(input, mockClusterStore) - roleName, err := roleMgr.createRole(ctx, input) + roleName, err := roleMgr.createRole(ctx, input, testExpiredTraits) assert.Empty(it, roleName) assert.ErrorIs(it, err, accessScopeCreationErr) @@ -523,7 +523,7 @@ func TestCreateRole(t *testing.T) { ClusterScopes: []*v1.ClusterScope{requestSingleNamespace}, } - roleName, err := roleMgr.createRole(ctx, input) + roleName, err := roleMgr.createRole(ctx, input, testExpiredTraits) assert.Empty(it, roleName) assert.ErrorIs(it, err, permissionSetCreationErr) @@ -616,7 +616,7 @@ func testPermissionSet(permissions map[string]v1.Access) *storage.PermissionSet Name: fmt.Sprintf(permissionSetNameFormat, permissionSetID), Description: permissionSetDescription, ResourceToAccess: make(map[string]storage.Access), - Traits: generatedObjectTraits.CloneVT(), + Traits: testExpiredTraits, } for _, resource := range resources { permissionSet.ResourceToAccess[resource] = convertAccess(permissions[resource]) @@ -634,7 +634,7 @@ func testAccessScope(targetScopes []*v1.ClusterScope) *storage.SimpleAccessScope IncludedClusters: make([]string, 0), IncludedNamespaces: make([]*storage.SimpleAccessScope_Rules_Namespace, 0), }, - Traits: generatedObjectTraits.CloneVT(), + Traits: testExpiredTraits, } for _, targetScope := range targetScopes { if targetScope == nil { @@ -668,7 +668,7 @@ func testRole(permissions map[string]v1.Access, targetScopes []*v1.ClusterScope) Description: roleDescription, PermissionSetId: permissionSetID, AccessScopeId: accessScopeID, - Traits: generatedObjectTraits.CloneVT(), + Traits: testExpiredTraits, } return role } diff --git a/central/auth/internaltokens/service/service_impl.go b/central/auth/internaltokens/service/service_impl.go index 4d8da5c693755..f8057a44bd625 100644 --- a/central/auth/internaltokens/service/service_impl.go +++ b/central/auth/internaltokens/service/service_impl.go @@ -23,6 +23,10 @@ import ( const ( claimNameFormat = "Generated claims for role %s expiring at %s" + + // rbacObjectsGraceExpiration expands expired RBAC objects lifetime to allow + // requests complete even if the token expires during requests handling. + rbacObjectsGraceExpiration = 2 * time.Minute ) var ( @@ -40,6 +44,8 @@ var ( sac.ResourceScopeKeys(resources.Cluster), ), ) + + errBadExpirationValue = errox.InvalidArgs.New("bad expiration timestamp") ) type serviceImpl struct { @@ -71,14 +77,21 @@ func (s *serviceImpl) GenerateTokenForPermissionsAndScope( ctx context.Context, req *v1.GenerateTokenForPermissionsAndScopeRequest, ) (*v1.GenerateTokenForPermissionsAndScopeResponse, error) { - roleName, err := s.roleManager.createRole(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "creating and storing target role") - } + // Calculate expiry first so we can set it on the RBAC objects. expiresAt, err := s.getExpiresAt(ctx, req) if err != nil { return nil, errors.Wrap(err, "getting expiration time") } + traits, err := generateTraitsWithExpiry(expiresAt.Add(rbacObjectsGraceExpiration)) + if err != nil { + return nil, errBadExpirationValue.CausedBy(err) + } + // Create the role with the same expiry as the token, so pruning can clean + // it up. + roleName, err := s.roleManager.createRole(ctx, req, traits) + if err != nil { + return nil, errors.Wrap(err, "creating and storing target role") + } claimName := fmt.Sprintf(claimNameFormat, roleName, expiresAt.Format(time.RFC3339Nano)) roxClaims := tokens.RoxClaims{ RoleNames: []string{roleName}, diff --git a/central/auth/internaltokens/service/service_impl_test.go b/central/auth/internaltokens/service/service_impl_test.go index 882060218b0ec..733d4e3874406 100644 --- a/central/auth/internaltokens/service/service_impl_test.go +++ b/central/auth/internaltokens/service/service_impl_test.go @@ -22,14 +22,8 @@ import ( var ( errDummy = errors.New("test error") - - expectedExpiration = time.Date(1989, time.November, 9, 18, 10, 35, 987654321, time.UTC) ) -func testClock() time.Time { - return time.Date(1989, time.November, 9, 18, 05, 35, 987654321, time.UTC) -} - func TestGetExpiresAt(t *testing.T) { for name, tc := range map[string]struct { input *v1.GenerateTokenForPermissionsAndScopeRequest @@ -71,7 +65,7 @@ func TestGetExpiresAt(t *testing.T) { }, }, expectsErr: false, - expectedExpiration: expectedExpiration, + expectedExpiration: testTokenExpiry, }, } { t.Run(name, func(it *testing.T) { @@ -133,8 +127,9 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { mockClusterStore := clusterDataStoreMocks.NewMockDataStore(mockCtrl) mockRoleStore := roleDataStoreMocks.NewMockDataStore(mockCtrl) svc := createService(nil, mockClusterStore, mockRoleStore) - setClusterStoreExpectations(input, mockClusterStore) - setNormalRoleStoreExpectations(deploymentPS, singleNSScope, expectedRole, nil, mockRoleStore) + // Note: With the new code structure, getExpiresAt is called first. + // Since Lifetime is nil, getExpiresAt returns an error before createRole is called, + // so we don't set up any role store expectations. rsp, err := svc.GenerateTokenForPermissionsAndScope(t.Context(), input) assert.Nil(it, rsp) @@ -144,7 +139,7 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { input := &v1.GenerateTokenForPermissionsAndScopeRequest{ Permissions: deploymentPermission, ClusterScopes: []*v1.ClusterScope{requestSingleNamespace}, - Lifetime: nil, + Lifetime: testExpirationDuration, } mockCtrl := gomock.NewController(it) @@ -165,7 +160,7 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { input := &v1.GenerateTokenForPermissionsAndScopeRequest{ Permissions: deploymentPermission, ClusterScopes: []*v1.ClusterScope{requestSingleNamespace}, - Lifetime: &durationpb.Duration{Seconds: 300}, + Lifetime: testExpirationDuration, } mockCtrl := gomock.NewController(it) @@ -180,7 +175,7 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { Name: fmt.Sprintf( claimNameFormat, expectedRole.GetName(), - expectedExpiration.Format(time.RFC3339Nano), + testTokenExpiry.Format(time.RFC3339Nano), ), } mockIssuer.EXPECT(). @@ -195,7 +190,7 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { input := &v1.GenerateTokenForPermissionsAndScopeRequest{ Permissions: deploymentPermission, ClusterScopes: []*v1.ClusterScope{requestSingleNamespace}, - Lifetime: &durationpb.Duration{Seconds: 300}, + Lifetime: testExpirationDuration, } mockCtrl := gomock.NewController(it) @@ -210,7 +205,7 @@ func TestGenerateTokenForPermissionsAndScope(t *testing.T) { Name: fmt.Sprintf( "Generated claims for role %s expiring at %s", expectedRole.GetName(), - expectedExpiration.Format(time.RFC3339Nano), + testTokenExpiry.Format(time.RFC3339Nano), ), } mockIssuer.EXPECT(). diff --git a/central/auth/internaltokens/service/timeutils_test.go b/central/auth/internaltokens/service/timeutils_test.go new file mode 100644 index 0000000000000..840681504bfbd --- /dev/null +++ b/central/auth/internaltokens/service/timeutils_test.go @@ -0,0 +1,29 @@ +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/durationpb" +) + +var ( + // testExpirationDuration is the constant token expiration duration. + testExpirationDuration = &durationpb.Duration{Seconds: 300} + // testTokenExpiry is the timestamp of the token expiration. + testTokenExpiry = testClock().Add(testExpirationDuration.AsDuration()) + // testExpiredTraits include the expired trait. + testExpiredTraits, _ = generateTraitsWithExpiry(testTokenExpiry.Add(rbacObjectsGraceExpiration)) +) + +// testClock is the clock function injection for testing purposes. +func testClock() time.Time { + return time.Date(1989, time.November, 9, 18, 05, 35, 987654321, time.UTC) +} + +func Test_clock(t *testing.T) { + now := testClock() + assert.Equal(t, testExpirationDuration.GetSeconds(), int64(testTokenExpiry.Sub(now).Seconds())) + assert.Equal(t, rbacObjectsGraceExpiration, testExpiredTraits.GetExpiresAt().AsTime().Sub(testTokenExpiry)) +} diff --git a/central/convert/storagetov1/auth_m2m_config_test.go b/central/convert/storagetov1/auth_m2m_config_test.go index 43bb0b5a3347b..ffea2ded2e5e6 100644 --- a/central/convert/storagetov1/auth_m2m_config_test.go +++ b/central/convert/storagetov1/auth_m2m_config_test.go @@ -17,6 +17,8 @@ func TestAuthM2MConfig(t *testing.T) { v1Config := AuthM2MConfig(config) + // Clear ExpiresAt since v1.Traits doesn't have this field (internal storage field). + config.GetTraits().ExpiresAt = nil convertTestUtils.AssertProtoMessageEqual(t, config, v1Config) assert.IsType(t, &v1.AuthMachineToMachineConfig{}, v1Config) } diff --git a/central/convert/storagetov1/traits_test.go b/central/convert/storagetov1/traits_test.go index db6a55c7122d5..8a6e1cb47f4f4 100644 --- a/central/convert/storagetov1/traits_test.go +++ b/central/convert/storagetov1/traits_test.go @@ -15,5 +15,7 @@ func TestTraits(t *testing.T) { v1Traits := Traits(traits) + // Clear ExpiresAt since v1.Traits doesn't have this field (internal storage field). + traits.ExpiresAt = nil convertTestUtils.AssertProtoMessageEqual(t, traits, v1Traits) } diff --git a/central/graphql/resolvers/generated.go b/central/graphql/resolvers/generated.go index 253ca2ce23841..1c073d5911daa 100644 --- a/central/graphql/resolvers/generated.go +++ b/central/graphql/resolvers/generated.go @@ -1492,6 +1492,7 @@ func registerGeneratedTypes(builder generator.SchemaBuilder) { "disabled: Boolean!", })) utils.Must(builder.AddType("Traits", []string{ + "expiresAt: Time", "mutabilityMode: Traits_MutabilityMode!", "origin: Traits_Origin!", "visibility: Traits_Visibility!", @@ -16118,6 +16119,11 @@ func (resolver *Resolver) wrapTraitsesWithContext(ctx context.Context, values [] return output, nil } +func (resolver *traitsResolver) ExpiresAt(ctx context.Context) (*graphql.Time, error) { + value := resolver.data.GetExpiresAt() + return protocompat.ConvertTimestampToGraphqlTimeOrError(value) +} + func (resolver *traitsResolver) MutabilityMode(ctx context.Context) string { value := resolver.data.GetMutabilityMode() return value.String() diff --git a/central/pruning/pruning.go b/central/pruning/pruning.go index 9e72ec93029ef..02f9db3ae6054 100644 --- a/central/pruning/pruning.go +++ b/central/pruning/pruning.go @@ -29,6 +29,7 @@ import ( "github.com/stackrox/rox/central/reports/common" snapshotDS "github.com/stackrox/rox/central/reports/snapshot/datastore" riskDataStore "github.com/stackrox/rox/central/risk/datastore" + roleDataStore "github.com/stackrox/rox/central/role/datastore" serviceAccountDataStore "github.com/stackrox/rox/central/serviceaccount/datastore" vulnReqDataStore "github.com/stackrox/rox/central/vulnmgmt/vulnerabilityrequest/datastore" v1 "github.com/stackrox/rox/generated/api/v1" @@ -108,6 +109,7 @@ func newGarbageCollector(alerts alertDatastore.DataStore, plops plopDataStore.DataStore, blobStore blobDatastore.Datastore, nodeCVEStore nodeCVEDS.DataStore, + roleStore roleDataStore.DataStore, ) GarbageCollector { return &garbageCollectorImpl{ alerts: alerts, @@ -134,6 +136,7 @@ func newGarbageCollector(alerts alertDatastore.DataStore, plops: plops, blobStore: blobStore, nodeCVEStore: nodeCVEStore, + roleStore: roleStore, } } @@ -163,6 +166,7 @@ type garbageCollectorImpl struct { plops plopDataStore.DataStore blobStore blobDatastore.Datastore nodeCVEStore nodeCVEDS.DataStore + roleStore roleDataStore.DataStore } func (g *garbageCollectorImpl) Start() { @@ -192,6 +196,7 @@ func (g *garbageCollectorImpl) pruneBasedOnConfig() { g.removeExpiredAdministrationEvents(pvtConfig) g.removeExpiredDiscoveredClusters() g.removeInvalidAPITokens() + g.removeExpiredDynamicRBACObjects() postgres.PruneClusterHealthStatuses(pruningCtx, g.postgres) g.pruneLogImbues() @@ -1149,6 +1154,60 @@ func (g *garbageCollectorImpl) pruneOrphanedNodeCVEs() { } } +// traitsHolder is an interface for objects that have traits. +type traitsHolder interface { + GetTraits() *storage.Traits +} + +// withTraitsFilter creates a filter function that applies a traits-based predicate to objects. +func withTraitsFilter[T traitsHolder](traitsPredicate func(*storage.Traits) bool) func(T) bool { + return func(obj T) bool { + return traitsPredicate(obj.GetTraits()) + } +} + +// isExpired returns true if the traits have a non-nil expires_at timestamp in the past. +func isExpired(traits *storage.Traits, now time.Time) bool { + expiresAt := traits.GetExpiresAt() + return expiresAt != nil && now.After(expiresAt.AsTime()) +} + +// removeExpiredDynamicRBACObjects removes roles, permission sets, and access scopes +// that have an expiry timestamp in the past and have IMPERATIVE origin. +// These objects are created dynamically by the internal token API for sensors. +func (g *garbageCollectorImpl) removeExpiredDynamicRBACObjects() { + defer metrics.SetPruningDuration(time.Now(), "DynamicRBACObjects") + + now := time.Now() + expiredFilter := func(traits *storage.Traits) bool { + return isExpired(traits, now) + } + + // First, remove expired roles (must be done before permission sets/access scopes + // because roles reference them and deletion will fail if still referenced). + expiredRoleCount, err := g.roleStore.RemoveFilteredRoles(pruningCtx, withTraitsFilter[*storage.Role](expiredFilter)) + if err != nil { + log.Error("Failed to remove expired roles: ", err) + } + + // Then remove expired permission sets that are no longer referenced. + expiredPSCount, err := g.roleStore.RemoveFilteredPermissionSets(pruningCtx, withTraitsFilter[*storage.PermissionSet](expiredFilter)) + if err != nil { + log.Error("Failed to remove expired permission sets: ", err) + } + + // Finally remove expired access scopes that are no longer referenced. + expiredASCount, err := g.roleStore.RemoveFilteredAccessScopes(pruningCtx, withTraitsFilter[*storage.SimpleAccessScope](expiredFilter)) + if err != nil { + log.Error("Failed to remove expired access scopes: ", err) + } + + if expiredRoleCount+expiredPSCount+expiredASCount > 0 { + log.Infof("[Expired objects pruning] Removed %d roles, %d permission sets, %d access scopes", + expiredRoleCount, expiredPSCount, expiredASCount) + } +} + func (g *garbageCollectorImpl) Stop() { g.stopper.Client().Stop() _ = g.stopper.Client().Stopped().Wait() diff --git a/central/pruning/pruning_test.go b/central/pruning/pruning_test.go index 0bbd7141fd5aa..76ad57f4944b1 100644 --- a/central/pruning/pruning_test.go +++ b/central/pruning/pruning_test.go @@ -51,6 +51,7 @@ import ( roleBindingMocks "github.com/stackrox/rox/central/rbac/k8srolebinding/datastore/mocks" riskDatastore "github.com/stackrox/rox/central/risk/datastore" riskDatastoreMocks "github.com/stackrox/rox/central/risk/datastore/mocks" + roleDataStore "github.com/stackrox/rox/central/role/datastore" secretMocks "github.com/stackrox/rox/central/secret/datastore/mocks" connectionMocks "github.com/stackrox/rox/central/sensor/service/connection/mocks" serviceAccountDataStore "github.com/stackrox/rox/central/serviceaccount/datastore" @@ -575,7 +576,7 @@ func (s *PruningTestSuite) TestImagePruning() { gc := newGarbageCollector(alerts, nodes, images, imagesV2, nil, deployments, pods, nil, nil, nil, config, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil).(*garbageCollectorImpl) + nil, nil, nil).(*garbageCollectorImpl) // Add images, deployments, and pods into the datastores if c.deployment != nil { @@ -868,7 +869,7 @@ func (s *PruningTestSuite) TestClusterPruning() { gc := newGarbageCollector(nil, nil, nil, nil, clusterDS, deploymentsDS, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil).(*garbageCollectorImpl) + nil, nil, nil, nil).(*garbageCollectorImpl) gc.collectClusters(c.config) // Now get all clusters and compare the names to ensure only the expected ones exist @@ -995,7 +996,7 @@ func (s *PruningTestSuite) TestClusterPruningCentralCheck() { gc := newGarbageCollector(nil, nil, nil, nil, clusterDS, deploymentsDS, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil).(*garbageCollectorImpl) + nil, nil, nil, nil).(*garbageCollectorImpl) gc.collectClusters(getCluserRetentionConfig(60, 90, 72)) // Now get all clusters and compare the names to ensure only the expected ones exist @@ -1173,7 +1174,7 @@ func (s *PruningTestSuite) TestAlertPruning() { gc := newGarbageCollector(alerts, nodes, images, imagesV2, nil, deployments, nil, nil, nil, nil, config, nil, nil, nil, nil, nil, nil, nil, nil, - nil, nil, nil).(*garbageCollectorImpl) + nil, nil, nil, nil).(*garbageCollectorImpl) // Add alerts into the datastores for _, alert := range c.alerts { @@ -2329,6 +2330,291 @@ func (s *PruningTestSuite) TestPruneOrphanedNodeCVEs() { } } +func (s *PruningTestSuite) TestRemoveExpiredDynamicRBACObjects() { + now := time.Now() + yesterday := now.Add(-24 * time.Hour) + tomorrow := now.Add(24 * time.Hour) + twoDaysAgo := now.Add(-48 * time.Hour) + + // Create classic (non-expiring) access scope and permission set for roles to reference. + classicPS := &storage.PermissionSet{ + Id: uuid.NewV4().String(), + Name: "classic-ps", + ResourceToAccess: map[string]storage.Access{"Cluster": storage.Access_READ_ACCESS}, + Traits: nil, + } + classicAS := &storage.SimpleAccessScope{ + Id: uuid.NewV4().String(), + Name: "classic-as", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: nil, + } + + // Pre-generate UUIDs for test cases where we need to reference them in expected deletions. + ps1ID := uuid.NewV4().String() + ps2ID := uuid.NewV4().String() + ps3ID := uuid.NewV4().String() + as1ID := uuid.NewV4().String() + as2ID := uuid.NewV4().String() + as3ID := uuid.NewV4().String() + psExpiredID := uuid.NewV4().String() + psNoExpiryID := uuid.NewV4().String() + asExpiredID := uuid.NewV4().String() + asActiveID := uuid.NewV4().String() + + cases := []struct { + name string + roles []*storage.Role + permissionSets []*storage.PermissionSet + accessScopes []*storage.SimpleAccessScope + expectedRoleDeletions set.FrozenStringSet + expectedPSDeletions set.FrozenStringSet + expectedASDeletions set.FrozenStringSet + }{ + { + name: "remove expired roles only", + roles: []*storage.Role{ + { + Name: "role-1", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + }, + { + Name: "role-2", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + { + Name: "role-3", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{}, + }, + }, + expectedRoleDeletions: set.NewFrozenStringSet("role-1"), + expectedPSDeletions: set.NewFrozenStringSet(), + expectedASDeletions: set.NewFrozenStringSet(), + }, + { + name: "remove expired permission sets only", + permissionSets: []*storage.PermissionSet{ + { + Id: ps1ID, + Name: "ps-1", + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(twoDaysAgo), + }, + }, + { + Id: ps2ID, + Name: "ps-2", + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + { + Id: ps3ID, + Name: "ps-3", + Traits: &storage.Traits{}, + }, + }, + expectedRoleDeletions: set.NewFrozenStringSet(), + expectedPSDeletions: set.NewFrozenStringSet(ps1ID), + expectedASDeletions: set.NewFrozenStringSet(), + }, + { + name: "remove expired access scopes only", + accessScopes: []*storage.SimpleAccessScope{ + { + Id: as1ID, + Name: "as-1", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + }, + { + Id: as2ID, + Name: "as-2", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + { + Id: as3ID, + Name: "as-3", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{}, + }, + }, + expectedRoleDeletions: set.NewFrozenStringSet(), + expectedPSDeletions: set.NewFrozenStringSet(), + expectedASDeletions: set.NewFrozenStringSet(as1ID), + }, + { + name: "remove expired objects of all types", + roles: []*storage.Role{ + { + Name: "role-expired", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + }, + { + Name: "role-active", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + }, + permissionSets: []*storage.PermissionSet{ + { + Id: psExpiredID, + Name: "ps-expired", + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(twoDaysAgo), + }, + }, + { + Id: psNoExpiryID, + Name: "ps-no-expiry", + Traits: &storage.Traits{}, + }, + }, + accessScopes: []*storage.SimpleAccessScope{ + { + Id: asExpiredID, + Name: "as-expired", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + }, + { + Id: asActiveID, + Name: "as-active", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + }, + expectedRoleDeletions: set.NewFrozenStringSet("role-expired"), + expectedPSDeletions: set.NewFrozenStringSet(psExpiredID), + expectedASDeletions: set.NewFrozenStringSet(asExpiredID), + }, + { + name: "nothing to remove when all unexpired", + roles: []*storage.Role{ + { + Name: "role-1", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + }, + permissionSets: []*storage.PermissionSet{ + { + Id: uuid.NewV4().String(), + Name: "ps-1", + Traits: &storage.Traits{}, + }, + }, + accessScopes: []*storage.SimpleAccessScope{ + { + Id: uuid.NewV4().String(), + Name: "as-1", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(tomorrow), + }, + }, + }, + expectedRoleDeletions: set.NewFrozenStringSet(), + expectedPSDeletions: set.NewFrozenStringSet(), + expectedASDeletions: set.NewFrozenStringSet(), + }, + { + name: "nothing to remove when no objects exist", + expectedRoleDeletions: set.NewFrozenStringSet(), + expectedPSDeletions: set.NewFrozenStringSet(), + expectedASDeletions: set.NewFrozenStringSet(), + }, + } + + for _, c := range cases { + s.T().Run(c.name, func(t *testing.T) { + roleStore := roleDataStore.GetTestPostgresDataStore(t, s.pool) + + // Add classic permission set and access scope for roles to reference. + assert.NoError(t, roleStore.AddPermissionSet(pruningCtx, classicPS)) + assert.NoError(t, roleStore.AddAccessScope(pruningCtx, classicAS)) + t.Cleanup(func() { + _ = roleStore.RemovePermissionSet(pruningCtx, classicPS.GetId()) + _ = roleStore.RemoveAccessScope(pruningCtx, classicAS.GetId()) + }) + + for _, role := range c.roles { + assert.NoError(t, roleStore.AddRole(pruningCtx, role)) + t.Cleanup(func() { + _ = roleStore.RemoveRole(pruningCtx, role.GetName()) + }) + } + + for _, ps := range c.permissionSets { + assert.NoError(t, roleStore.AddPermissionSet(pruningCtx, ps)) + t.Cleanup(func() { + _ = roleStore.RemovePermissionSet(pruningCtx, ps.GetId()) + }) + } + + for _, as := range c.accessScopes { + assert.NoError(t, roleStore.AddAccessScope(pruningCtx, as)) + t.Cleanup(func() { + _ = roleStore.RemoveAccessScope(pruningCtx, as.GetId()) + }) + } + + gc := &garbageCollectorImpl{ + roleStore: roleStore, + } + + gc.removeExpiredDynamicRBACObjects() + + for _, role := range c.roles { + _, ok, err := roleStore.GetRole(pruningCtx, role.GetName()) + assert.NoError(t, err) + assert.Equal(t, !c.expectedRoleDeletions.Contains(role.GetName()), ok) + } + + for _, ps := range c.permissionSets { + _, ok, err := roleStore.GetPermissionSet(pruningCtx, ps.GetId()) + assert.NoError(t, err) + assert.Equal(t, !c.expectedPSDeletions.Contains(ps.GetId()), ok) + } + + for _, as := range c.accessScopes { + _, ok, err := roleStore.GetAccessScope(pruningCtx, as.GetId()) + assert.NoError(t, err) + assert.Equal(t, !c.expectedASDeletions.Contains(as.GetId()), ok) + } + }) + } +} + func (s *PruningTestSuite) addSomePods(podDS podDatastore.DataStore, clusterID string, numberPods int) { for i := 0; i < numberPods; i++ { pod := &storage.Pod{ diff --git a/central/pruning/singleton.go b/central/pruning/singleton.go index 1f6640d32460d..b4e6482885972 100644 --- a/central/pruning/singleton.go +++ b/central/pruning/singleton.go @@ -21,6 +21,7 @@ import ( k8srolebindingStore "github.com/stackrox/rox/central/rbac/k8srolebinding/datastore" snapshotDataStore "github.com/stackrox/rox/central/reports/snapshot/datastore" riskDataStore "github.com/stackrox/rox/central/risk/datastore" + roleDataStore "github.com/stackrox/rox/central/role/datastore" serviceAccountDataStore "github.com/stackrox/rox/central/serviceaccount/datastore" vulnReqDataStore "github.com/stackrox/rox/central/vulnmgmt/vulnerabilityrequest/datastore" "github.com/stackrox/rox/pkg/sync" @@ -56,6 +57,7 @@ func Singleton() GarbageCollector { plopDataStore.Singleton(), blobDS.Singleton(), nodeCVEDS.Singleton(), + roleDataStore.Singleton(), ) }) return gc diff --git a/central/role/datastore/datastore.go b/central/role/datastore/datastore.go index 5ea32e1d500d3..d17aea8b02b20 100644 --- a/central/role/datastore/datastore.go +++ b/central/role/datastore/datastore.go @@ -20,6 +20,7 @@ type DataStore interface { AddRole(ctx context.Context, role *storage.Role) error UpdateRole(ctx context.Context, role *storage.Role) error RemoveRole(ctx context.Context, name string) error + RemoveFilteredRoles(ctx context.Context, filter func(*storage.Role) bool) (int, error) GetPermissionSet(ctx context.Context, id string) (*storage.PermissionSet, bool, error) GetAllPermissionSets(ctx context.Context) ([]*storage.PermissionSet, error) @@ -29,6 +30,7 @@ type DataStore interface { UpdatePermissionSet(ctx context.Context, permissionSet *storage.PermissionSet) error UpsertPermissionSet(ctx context.Context, permissionSet *storage.PermissionSet) error RemovePermissionSet(ctx context.Context, id string) error + RemoveFilteredPermissionSets(ctx context.Context, filter func(*storage.PermissionSet) bool) (int, error) GetAccessScope(ctx context.Context, id string) (*storage.SimpleAccessScope, bool, error) GetAllAccessScopes(ctx context.Context) ([]*storage.SimpleAccessScope, error) @@ -39,6 +41,7 @@ type DataStore interface { UpdateAccessScope(ctx context.Context, scope *storage.SimpleAccessScope) error UpsertAccessScope(ctx context.Context, scope *storage.SimpleAccessScope) error RemoveAccessScope(ctx context.Context, id string) error + RemoveFilteredAccessScopes(ctx context.Context, filter func(*storage.SimpleAccessScope) bool) (int, error) GetAllResolvedRoles(ctx context.Context) ([]permissions.ResolvedRole, error) GetAndResolveRole(ctx context.Context, name string) (permissions.ResolvedRole, error) diff --git a/central/role/datastore/datastore_impl.go b/central/role/datastore/datastore_impl.go index 9c9361fa238df..84bf7288612ad 100644 --- a/central/role/datastore/datastore_impl.go +++ b/central/role/datastore/datastore_impl.go @@ -16,6 +16,7 @@ import ( "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" + "github.com/stackrox/rox/pkg/set" "github.com/stackrox/rox/pkg/sync" ) @@ -62,8 +63,7 @@ func (ds *dataStoreImpl) UpsertRole(ctx context.Context, newRole *storage.Role) return err } - // Constraints ok, write the object. We expect the underlying store to - // verify there is no role with the same name. + // Constraints ok, upsert the object (create if new, update if exists). if err := ds.roleStorage.Upsert(ctx, newRole); err != nil { return err } @@ -95,8 +95,7 @@ func (ds *dataStoreImpl) UpsertPermissionSet(ctx context.Context, newPS *storage return err } - // Constraints ok, write the object. We expect the underlying store to - // verify there is no permission set with the same name. + // Constraints ok, upsert the object (create if new, update if exists). if err := ds.permissionSetStorage.Upsert(ctx, newPS); err != nil { return err } @@ -128,8 +127,7 @@ func (ds *dataStoreImpl) UpsertAccessScope(ctx context.Context, newScope *storag return err } - // Constraints ok, write the object. We expect the underlying store to - // verify there is no access scope with the same name. + // Constraints ok, upsert the object (create if new, update if exists). if err := ds.accessScopeStorage.Upsert(ctx, newScope); err != nil { return err } @@ -278,7 +276,14 @@ func (ds *dataStoreImpl) RemoveRole(ctx context.Context, name string) error { return err } - if err := ds.verifyRoleForDeletion(ctx, name); err != nil { + role, found, err := ds.roleStorage.Get(ctx, name) + if err != nil { + return err + } + if !found { + return errors.Wrapf(errox.NotFound, "name = %q", name) + } + if err := ds.verifyRoleForDeletion(ctx, role); err != nil { return err } @@ -856,15 +861,7 @@ func (ds *dataStoreImpl) verifyRoleNameExists(ctx context.Context, name string) // It will: // - verify that the role is not a default role // - verify that the role exists -func (ds *dataStoreImpl) verifyRoleForDeletion(ctx context.Context, name string) error { - role, found, err := ds.roleStorage.Get(ctx, name) - - if err != nil { - return err - } - if !found { - return errors.Wrapf(errox.NotFound, "name = %q", name) - } +func (ds *dataStoreImpl) verifyRoleForDeletion(ctx context.Context, role *storage.Role) error { if err := verifyRoleOrigin(ctx, role); err != nil { return err } @@ -937,3 +934,147 @@ func getGroupIDs(groups []*storage.Group) []string { } return groupIDs } + +//////////////////////////////////////////////////////////////////////////////// +// Removal of filtered objects // +// // + +func (ds *dataStoreImpl) RemoveFilteredRoles(ctx context.Context, filter func(*storage.Role) bool) (int, error) { + if err := sac.VerifyAuthzOK(roleSAC.WriteAllowed(ctx)); err != nil { + return 0, err + } + + ds.lock.Lock() + defer ds.lock.Unlock() + + rolesToDelete := make(map[string]*storage.Role) + walkFn := func() error { + return ds.roleStorage.Walk(ctx, func(role *storage.Role) error { + if filter(role) { + rolesToDelete[role.GetName()] = role + } + return nil + }) + } + if err := pgutils.RetryIfPostgres(ctx, walkFn); err != nil { + return 0, err + } + + for name, role := range rolesToDelete { + if err := ds.verifyRoleForDeletion(ctx, role); err != nil { + log.Debugf("Skipping role %q: %v", name, err) + delete(rolesToDelete, name) + } + } + + deletedCount := 0 + for name := range rolesToDelete { + if err := ds.roleStorage.Delete(ctx, name); err != nil { + log.Errorf("Failed to delete filtered role %q: %v", name, err) + } else { + deletedCount++ + } + } + + return deletedCount, nil +} + +func (ds *dataStoreImpl) RemoveFilteredPermissionSets(ctx context.Context, filter func(*storage.PermissionSet) bool) (int, error) { + if err := sac.VerifyAuthzOK(roleSAC.WriteAllowed(ctx)); err != nil { + return 0, err + } + + ds.lock.Lock() + defer ds.lock.Unlock() + + // First, identify permission sets that match the filter. + candidateSet := set.NewStringSet() + walkPSFn := func() error { + return ds.permissionSetStorage.Walk(ctx, func(permissionSet *storage.PermissionSet) error { + if filter(permissionSet) { + candidateSet.Add(permissionSet.GetId()) + } + return nil + }) + } + if err := pgutils.RetryIfPostgres(ctx, walkPSFn); err != nil { + return 0, err + } + + if candidateSet.Cardinality() == 0 { + return 0, nil + } + + // Walk through roles to find which candidate permission sets are referenced. + walkRolesFn := func() error { + return ds.roleStorage.Walk(ctx, func(role *storage.Role) error { + candidateSet.Remove(role.GetPermissionSetId()) + return nil + }) + } + if err := pgutils.RetryIfPostgres(ctx, walkRolesFn); err != nil { + return 0, err + } + + // Delete permission sets that are not referenced. + deletedCount := 0 + for _, id := range candidateSet.AsSlice() { + if err := ds.permissionSetStorage.Delete(ctx, id); err != nil { + log.Errorf("Failed to delete filtered permission set %q: %v", id, err) + } else { + deletedCount++ + } + } + + return deletedCount, nil +} + +func (ds *dataStoreImpl) RemoveFilteredAccessScopes(ctx context.Context, filter func(*storage.SimpleAccessScope) bool) (int, error) { + if err := sac.VerifyAuthzOK(roleSAC.WriteAllowed(ctx)); err != nil { + return 0, err + } + + ds.lock.Lock() + defer ds.lock.Unlock() + + // First, identify access scopes that match the filter. + candidateSet := set.NewStringSet() + walkASFn := func() error { + return ds.accessScopeStorage.Walk(ctx, func(accessScope *storage.SimpleAccessScope) error { + if filter(accessScope) { + candidateSet.Add(accessScope.GetId()) + } + return nil + }) + } + if err := pgutils.RetryIfPostgres(ctx, walkASFn); err != nil { + return 0, err + } + + if candidateSet.Cardinality() == 0 { + return 0, nil + } + + // Walk through roles to find which candidate access scopes are referenced. + walkRolesFn := func() error { + return ds.roleStorage.Walk(ctx, func(role *storage.Role) error { + candidateSet.Remove(role.GetAccessScopeId()) + return nil + }) + } + if err := pgutils.RetryIfPostgres(ctx, walkRolesFn); err != nil { + return 0, err + } + + // Delete access scopes that are not referenced. + deletedCount := 0 + for _, id := range candidateSet.AsSlice() { + if err := ds.accessScopeStorage.Delete(ctx, id); err != nil { + log.Errorf("Failed to delete filtered access scope %q: %v", id, err) + } else { + deletedCount++ + } + } + + return deletedCount, nil +} diff --git a/central/role/datastore/mocks/datastore.go b/central/role/datastore/mocks/datastore.go index d0896c976e5f5..e20b4cc6cb3f9 100644 --- a/central/role/datastore/mocks/datastore.go +++ b/central/role/datastore/mocks/datastore.go @@ -342,6 +342,51 @@ func (mr *MockDataStoreMockRecorder) RemoveAccessScope(ctx, id any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAccessScope", reflect.TypeOf((*MockDataStore)(nil).RemoveAccessScope), ctx, id) } +// RemoveFilteredAccessScopes mocks base method. +func (m *MockDataStore) RemoveFilteredAccessScopes(ctx context.Context, filter func(*storage.SimpleAccessScope) bool) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveFilteredAccessScopes", ctx, filter) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveFilteredAccessScopes indicates an expected call of RemoveFilteredAccessScopes. +func (mr *MockDataStoreMockRecorder) RemoveFilteredAccessScopes(ctx, filter any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveFilteredAccessScopes", reflect.TypeOf((*MockDataStore)(nil).RemoveFilteredAccessScopes), ctx, filter) +} + +// RemoveFilteredPermissionSets mocks base method. +func (m *MockDataStore) RemoveFilteredPermissionSets(ctx context.Context, filter func(*storage.PermissionSet) bool) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveFilteredPermissionSets", ctx, filter) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveFilteredPermissionSets indicates an expected call of RemoveFilteredPermissionSets. +func (mr *MockDataStoreMockRecorder) RemoveFilteredPermissionSets(ctx, filter any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveFilteredPermissionSets", reflect.TypeOf((*MockDataStore)(nil).RemoveFilteredPermissionSets), ctx, filter) +} + +// RemoveFilteredRoles mocks base method. +func (m *MockDataStore) RemoveFilteredRoles(ctx context.Context, filter func(*storage.Role) bool) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveFilteredRoles", ctx, filter) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RemoveFilteredRoles indicates an expected call of RemoveFilteredRoles. +func (mr *MockDataStoreMockRecorder) RemoveFilteredRoles(ctx, filter any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveFilteredRoles", reflect.TypeOf((*MockDataStore)(nil).RemoveFilteredRoles), ctx, filter) +} + // RemovePermissionSet mocks base method. func (m *MockDataStore) RemovePermissionSet(ctx context.Context, id string) error { m.ctrl.T.Helper() diff --git a/generated/api/v1/auth_service.swagger.json b/generated/api/v1/auth_service.swagger.json index bc1fee72728a4..37b64e9ec38a3 100644 --- a/generated/api/v1/auth_service.swagger.json +++ b/generated/api/v1/auth_service.swagger.json @@ -468,6 +468,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/api/v1/authprovider_service.swagger.json b/generated/api/v1/authprovider_service.swagger.json index f3d09e80c2350..c0a0f83c3598b 100644 --- a/generated/api/v1/authprovider_service.swagger.json +++ b/generated/api/v1/authprovider_service.swagger.json @@ -591,6 +591,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/api/v1/group_service.swagger.json b/generated/api/v1/group_service.swagger.json index 83bd2b92da896..3d9617e78d00a 100644 --- a/generated/api/v1/group_service.swagger.json +++ b/generated/api/v1/group_service.swagger.json @@ -77,6 +77,14 @@ ], "default": "IMPERATIVE" }, + { + "name": "traits.expiresAt", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors.", + "in": "query", + "required": false, + "type": "string", + "format": "date-time" + }, { "name": "authProviderId", "in": "query", @@ -380,6 +388,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/api/v1/notifier_service.swagger.json b/generated/api/v1/notifier_service.swagger.json index ea69b91dd268c..91715919ade25 100644 --- a/generated/api/v1/notifier_service.swagger.json +++ b/generated/api/v1/notifier_service.swagger.json @@ -880,6 +880,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/api/v1/role_service.swagger.json b/generated/api/v1/role_service.swagger.json index ddbdeda1f8c64..94bd86348d8dc 100644 --- a/generated/api/v1/role_service.swagger.json +++ b/generated/api/v1/role_service.swagger.json @@ -1064,6 +1064,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/api/v1/signature_integration_service.swagger.json b/generated/api/v1/signature_integration_service.swagger.json index e693bd546db6a..681392ae39700 100644 --- a/generated/api/v1/signature_integration_service.swagger.json +++ b/generated/api/v1/signature_integration_service.swagger.json @@ -320,6 +320,11 @@ }, "origin": { "$ref": "#/definitions/storageTraitsOrigin" + }, + "expiresAt": { + "type": "string", + "format": "date-time", + "description": "expires_at specifies when this object should be considered expired and eligible for pruning.\nThis field is optional. Objects without an expires_at value are never pruned based on expiry.\nCurrently used for dynamically created RBAC objects (roles, permission sets, access scopes)\nthat are generated by the internal token API for sensors." } } }, diff --git a/generated/storage/traits.pb.go b/generated/storage/traits.pb.go index e15899cebf7bb..4bdf49fb8d111 100644 --- a/generated/storage/traits.pb.go +++ b/generated/storage/traits.pb.go @@ -9,6 +9,7 @@ package storage import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" unsafe "unsafe" @@ -205,8 +206,13 @@ type Traits struct { MutabilityMode Traits_MutabilityMode `protobuf:"varint,1,opt,name=mutability_mode,json=mutabilityMode,proto3,enum=storage.Traits_MutabilityMode" json:"mutability_mode,omitempty"` Visibility Traits_Visibility `protobuf:"varint,2,opt,name=visibility,proto3,enum=storage.Traits_Visibility" json:"visibility,omitempty"` Origin Traits_Origin `protobuf:"varint,3,opt,name=origin,proto3,enum=storage.Traits_Origin" json:"origin,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // expires_at specifies when this object should be considered expired and eligible for pruning. + // This field is optional. Objects without an expires_at value are never pruned based on expiry. + // Currently used for dynamically created RBAC objects (roles, permission sets, access scopes) + // that are generated by the internal token API for sensors. + ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Traits) Reset() { @@ -260,17 +266,26 @@ func (x *Traits) GetOrigin() Traits_Origin { return Traits_IMPERATIVE } +func (x *Traits) GetExpiresAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpiresAt + } + return nil +} + var File_storage_traits_proto protoreflect.FileDescriptor const file_storage_traits_proto_rawDesc = "" + "\n" + - "\x14storage/traits.proto\x12\astorage\"\x80\x03\n" + + "\x14storage/traits.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\x03\n" + "\x06Traits\x12G\n" + "\x0fmutability_mode\x18\x01 \x01(\x0e2\x1e.storage.Traits.MutabilityModeR\x0emutabilityMode\x12:\n" + "\n" + "visibility\x18\x02 \x01(\x0e2\x1a.storage.Traits.VisibilityR\n" + "visibility\x12.\n" + - "\x06origin\x18\x03 \x01(\x0e2\x16.storage.Traits.OriginR\x06origin\";\n" + + "\x06origin\x18\x03 \x01(\x0e2\x16.storage.Traits.OriginR\x06origin\x129\n" + + "\n" + + "expires_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\";\n" + "\x0eMutabilityMode\x12\x10\n" + "\fALLOW_MUTATE\x10\x00\x12\x17\n" + "\x13ALLOW_MUTATE_FORCED\x10\x01\"%\n" + @@ -303,20 +318,22 @@ func file_storage_traits_proto_rawDescGZIP() []byte { var file_storage_traits_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_storage_traits_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_storage_traits_proto_goTypes = []any{ - (Traits_MutabilityMode)(0), // 0: storage.Traits.MutabilityMode - (Traits_Visibility)(0), // 1: storage.Traits.Visibility - (Traits_Origin)(0), // 2: storage.Traits.Origin - (*Traits)(nil), // 3: storage.Traits + (Traits_MutabilityMode)(0), // 0: storage.Traits.MutabilityMode + (Traits_Visibility)(0), // 1: storage.Traits.Visibility + (Traits_Origin)(0), // 2: storage.Traits.Origin + (*Traits)(nil), // 3: storage.Traits + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp } var file_storage_traits_proto_depIdxs = []int32{ 0, // 0: storage.Traits.mutability_mode:type_name -> storage.Traits.MutabilityMode 1, // 1: storage.Traits.visibility:type_name -> storage.Traits.Visibility 2, // 2: storage.Traits.origin:type_name -> storage.Traits.Origin - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 4, // 3: storage.Traits.expires_at:type_name -> google.protobuf.Timestamp + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_storage_traits_proto_init() } diff --git a/generated/storage/traits_vtproto.pb.go b/generated/storage/traits_vtproto.pb.go index dd3deb932da92..caeb52f9a508b 100644 --- a/generated/storage/traits_vtproto.pb.go +++ b/generated/storage/traits_vtproto.pb.go @@ -7,8 +7,10 @@ package storage import ( fmt "fmt" protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + timestamppb1 "github.com/planetscale/vtprotobuf/types/known/timestamppb" proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" io "io" ) @@ -27,6 +29,7 @@ func (m *Traits) CloneVT() *Traits { r.MutabilityMode = m.MutabilityMode r.Visibility = m.Visibility r.Origin = m.Origin + r.ExpiresAt = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.ExpiresAt).CloneVT()) if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -53,6 +56,9 @@ func (this *Traits) EqualVT(that *Traits) bool { if this.Origin != that.Origin { return false } + if !(*timestamppb1.Timestamp)(this.ExpiresAt).EqualVT((*timestamppb1.Timestamp)(that.ExpiresAt)) { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -93,6 +99,16 @@ func (m *Traits) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ExpiresAt != nil { + size, err := (*timestamppb1.Timestamp)(m.ExpiresAt).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } if m.Origin != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Origin)) i-- @@ -126,6 +142,10 @@ func (m *Traits) SizeVT() (n int) { if m.Origin != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.Origin)) } + if m.ExpiresAt != nil { + l = (*timestamppb1.Timestamp)(m.ExpiresAt).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -216,6 +236,42 @@ func (m *Traits) UnmarshalVT(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExpiresAt == nil { + m.ExpiresAt = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.ExpiresAt).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -324,6 +380,42 @@ func (m *Traits) UnmarshalVTUnsafe(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExpiresAt == nil { + m.ExpiresAt = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.ExpiresAt).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index a806224d90996..6fbcf0b159ace 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -18641,10 +18641,20 @@ "id": 3, "name": "origin", "type": "Origin" + }, + { + "id": 4, + "name": "expires_at", + "type": "google.protobuf.Timestamp" } ] } ], + "imports": [ + { + "path": "google/protobuf/timestamp.proto" + } + ], "package": { "name": "storage" }, diff --git a/proto/storage/traits.proto b/proto/storage/traits.proto index 6901bf9e84115..dd17d33f38664 100644 --- a/proto/storage/traits.proto +++ b/proto/storage/traits.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package storage; +import "google/protobuf/timestamp.proto"; + option go_package = "./storage;storage"; option java_package = "io.stackrox.proto.storage"; @@ -59,4 +61,10 @@ message Traits { DYNAMIC = 4; } Origin origin = 3; + + // expires_at specifies when this object should be considered expired and eligible for pruning. + // This field is optional. Objects without an expires_at value are never pruned based on expiry. + // Currently used for dynamically created RBAC objects (roles, permission sets, access scopes) + // that are generated by the internal token API for sensors. + google.protobuf.Timestamp expires_at = 4; } From edb0bafc088ff957ac06b9e5bd9b62c6ebf653ae Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Wed, 28 Jan 2026 14:50:27 +0100 Subject: [PATCH 052/232] ROX-27191: operator manifest support, for community build (#18601) --- .openshift-ci/ci_tests.py | 29 +- .openshift-ci/post_tests.py | 2 +- CHANGELOG.md | 1 + operator/Makefile | 35 +- operator/README.md | 6 +- operator/config/default/kustomization.yaml | 16 +- .../development_build/kustomization.yaml | 18 + .../flavors/opensource/kustomization.yaml | 18 + operator/config/manifests/kustomization.yaml | 1 - operator/install/README.md | 49 + operator/install/manifest.yaml | 4635 +++++++++++++++++ ...th.yaml => 200-access-no-auth.gotmpl.yaml} | 4 +- ...2.yaml => 200-access-no-http2.gotmpl.yaml} | 4 +- ...yaml => 200-access-privileged.gotmpl.yaml} | 4 +- ...ml => 200-access-unprivileged.gotmpl.yaml} | 4 +- operator/tests/run.sh | 49 +- tests/e2e/lib.sh | 10 +- tests/e2e/run-scanner-v4-install.bats | 6 +- tools/allowed-large-files | 1 + 19 files changed, 4802 insertions(+), 90 deletions(-) create mode 100644 operator/config/flavors/development_build/kustomization.yaml create mode 100644 operator/config/flavors/opensource/kustomization.yaml create mode 100644 operator/install/README.md create mode 100644 operator/install/manifest.yaml rename operator/tests/controller/metrics/{200-access-no-auth.yaml => 200-access-no-auth.gotmpl.yaml} (87%) rename operator/tests/controller/metrics/{200-access-no-http2.yaml => 200-access-no-http2.gotmpl.yaml} (87%) rename operator/tests/controller/metrics/{200-access-privileged.yaml => 200-access-privileged.gotmpl.yaml} (88%) rename operator/tests/controller/metrics/{200-access-unprivileged.yaml => 200-access-unprivileged.gotmpl.yaml} (89%) diff --git a/.openshift-ci/ci_tests.py b/.openshift-ci/ci_tests.py index 3676771516f01..7dae063a04b0c 100755 --- a/.openshift-ci/ci_tests.py +++ b/.openshift-ci/ci_tests.py @@ -94,38 +94,17 @@ def run(self): "-p", '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'], self.OLM_SETUP_TIMEOUT_SEC, ) + print("Bouncing catalog operator pod to clear its cache") olm_ns = "openshift-operator-lifecycle-manager" - else: - print("Installing OLM") - attempts = 3 - for attempt in range(1, attempts + 1): - try: - self.run_with_graceful_kill( - ["make", "-C", "operator", "olm-install"], - self.OLM_SETUP_TIMEOUT_SEC, - ) - break - except Exception as ex: - if attempt == attempts: - raise - print(f"OLM install failed with {ex} (attempt {attempt}/{attempts}), retrying...") - print("Removing unused catalog source(s)") self.run_with_graceful_kill( - ["kubectl", "delete", "catalogsource.operators.coreos.com", - "--namespace=olm", "--all"], + ["kubectl", "delete", "pods", + f"--namespace={olm_ns}", "--selector", "app=catalog-operator", "--now=true"], self.OLM_SETUP_TIMEOUT_SEC, ) - olm_ns = "olm" - print("Bouncing catalog operator pod to clear its cache") - self.run_with_graceful_kill( - ["kubectl", "delete", "pods", - f"--namespace={olm_ns}", "--selector", "app=catalog-operator", "--now=true"], - self.OLM_SETUP_TIMEOUT_SEC, - ) print("Executing operator e2e tests") self.run_with_graceful_kill( - ["operator/tests/run.sh"], + ["operator/tests/run.sh", self._operator_cluster_type], self.TEST_TIMEOUT_SEC, output_dir="/tmp/operator-e2e-misc-logs", ) diff --git a/.openshift-ci/post_tests.py b/.openshift-ci/post_tests.py index 0abe84832d6d1..8c152df254b17 100644 --- a/.openshift-ci/post_tests.py +++ b/.openshift-ci/post_tests.py @@ -163,7 +163,7 @@ def __init__( self._check_stackrox_logs = check_stackrox_logs self.k8s_namespaces = [ "stackrox", - "stackrox-operator", + "rhacs-operator-system", "proxies", "squid", "kube-system", diff --git a/CHANGELOG.md b/CHANGELOG.md index c304847b31bd6..6fb59d5cac35c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc - ROX-30094, ROX-30610, ROX-30740: Add new namespaces to Layered Products default config regex. - ROX-31960, ROX-32449: include and exclude filters for custom metrics. - ROX-30641: Added a new policy criteria "Days Since CVE Fix Was Available". +- Tech preview: operator-based installation available for community StackRox build. More information in [a separate README file](operator/install/README.md). ### Removed Features - ROX-31727: `/v1/cve/requests` APIs (deprecated in 4.3.0) for managing vulnerability exceptions have been removed. diff --git a/operator/Makefile b/operator/Makefile index 244c495977e06..919cd8e57f205 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -105,14 +105,14 @@ SCORECARD_ARGS ?= --storage-image="$(SCORECARD_STORAGE_IMAGE)" --wait-time="$(SC PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # TEST_NAMESPACE is where the operator is installed for e2e tests by CI. -TEST_NAMESPACE ?= stackrox-operator +TEST_NAMESPACE ?= stackrox-operator-system # TEST_E2E_ENV_IS_OPENSHIFT is "true" when the cluster to run e2e tests on is an # OpenShift one, and "false" otherwise. TEST_E2E_ENV_IS_OPENSHIFT ?= $(shell $(PROJECT_DIR)/hack/retry-kubectl.sh < /dev/null get scc > /dev/null 2>&1 && echo true || echo false) -# KUTTL_TEST_RUN_LABELS_ARGS specifies what label set to use for kuttl tests. -KUTTL_TEST_RUN_LABELS_ARGS ?= --test-run-labels openshift=$(TEST_E2E_ENV_IS_OPENSHIFT) +# KUTTL_TEST_RUN_ARGS specifies what label set and templating vars to use for kuttl tests. +KUTTL_TEST_RUN_ARGS ?= --test-run-labels openshift=$(TEST_E2E_ENV_IS_OPENSHIFT) --template-var operator_ns=$(TEST_NAMESPACE) # ROX_IMAGE_FLAVOR is an ARG used in Dockerfiles that defines the default registries for main, scanner, and collector images. # ROX_IMAGE_FLAVOR valid values are: development_build, opensource, rhacs. @@ -264,19 +264,19 @@ validate-crs: yq .PHONY: test-e2e test-e2e: build install validate-crs kuttl ensure-rox-main-image-exists ## Run e2e tests with local manager. mkdir -p $(PROJECT_DIR)/build/kuttl-test-artifacts - KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" $(KUTTL) test $(KUTTL_TEST_RUN_LABELS_ARGS) + KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" $(KUTTL) test $(KUTTL_TEST_RUN_ARGS) .PHONY: test-e2e-deployed test-e2e-deployed: validate-crs kuttl ## Run e2e tests with manager deployed on cluster. mkdir -p $(PROJECT_DIR)/build/kuttl-test-artifacts - KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" SKIP_MANAGER_START=1 $(KUTTL) test $(KUTTL_TEST_RUN_LABELS_ARGS) + KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" SKIP_MANAGER_START=1 $(KUTTL) test $(KUTTL_TEST_RUN_ARGS) .PHONY: test-upgrade test-upgrade: kuttl bundle-post-process ## Run OLM-based operator upgrade tests. mkdir -p $(PROJECT_DIR)/build/kuttl-test-artifacts-upgrade SKIP_MANAGER_START=1 \ NEW_PRODUCT_VERSION=$$(make --quiet --no-print-directory -C .. tag) \ - KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" $(KUTTL) test --config kuttl-test.yaml --artifacts-dir build/kuttl-test-artifacts-upgrade $(KUTTL_TEST_RUN_LABELS_ARGS) tests/upgrade + KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" $(KUTTL) test --config kuttl-test.yaml --artifacts-dir build/kuttl-test-artifacts-upgrade $(KUTTL_TEST_RUN_ARGS) tests/upgrade .PHONY: test-bundle-helpers test-bundle-helpers: ## Run Python unit tests against helper scripts. @@ -307,9 +307,11 @@ ensure-rox-main-image-exists: .PHONY: build-installer build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment. + mkdir -p build/dist + echo 'resources: [ "../../config/flavors/$(ROX_IMAGE_FLAVOR)" ]' > build/dist/kustomization.yaml + cd build/dist && $(KUSTOMIZE) edit set image controller=${IMG} mkdir -p dist - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default > dist/install.yaml + $(KUSTOMIZE) build build/dist > dist/install.yaml .PHONY: build build: manifests generate fmt vet ## Build operator local binary. @@ -401,6 +403,18 @@ deploy-previous-via-olm: kuttl bundle-post-process ## Deploy replaced version of set -x ;\ KUTTL=$(KUTTL) PATH="$(PROJECT_DIR)/hack:$${PATH}" ./hack/olm-operator-install.sh $(TEST_NAMESPACE) $(INDEX_IMG_BASE) $(INDEX_IMG_TAG) $${replaced_version} $(INSTALL_CHANNEL) +.PHONY: deploy-via-installer +deploy-via-installer: ## Deploy current version of operator via distribution manifest file. + ./hack/retry-kubectl.sh build/manifests/kustomization.yaml + $(KUSTOMIZE) build build/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS) # Fix the createdAt annotation $(YQ) -i '.metadata.annotations.createdAt = ""' bundle/manifests/rhacs-operator.clusterserviceversion.yaml # Remove autogenerated operator channel LABEL from the bundle to avoid confusion because it's ignored anyways. diff --git a/operator/README.md b/operator/README.md index 20c59dfb166e7..7855ac223b4ab 100644 --- a/operator/README.md +++ b/operator/README.md @@ -278,12 +278,12 @@ Push your changes to a GitHub PR (draft is OK) to let CI build and push images f Now the latest version (based off of `make tag`) can be installed like so: ```bash -# TODO(ROX-11744): drop branding here once operator is available from quay.io/stackrox-io +# TODO(ROX-11744): drop branding here once operator is available via OLM from quay.io/stackrox-io ROX_PRODUCT_BRANDING=RHACS_BRANDING make deploy-via-olm ``` -This installs the operator into the `stackrox-operator` namespace. +This installs the operator into the `rhacs-operator-system` namespace. This can be overridden with a `TEST_NAMESPACE` argument. The version can be overridden with a `VERSION` argument. @@ -299,7 +299,7 @@ You can blow everything away with: ```bash $ make olm-uninstall -$ kubectl delete ns stackrox-operator +$ kubectl delete ns stackrox-operator-system # Optionally remove CRDs $ make uninstall diff --git a/operator/config/default/kustomization.yaml b/operator/config/default/kustomization.yaml index db94e3804b541..3788bd86ec67b 100644 --- a/operator/config/default/kustomization.yaml +++ b/operator/config/default/kustomization.yaml @@ -1,18 +1,4 @@ -# Adds namespace to all resources. -namespace: rhacs-operator-system - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. -namePrefix: rhacs-operator- - -# Labels to add to all resources and selectors. -labels: -- includeSelectors: true - pairs: - app: rhacs-operator +# This file is meant to be used by the kustomizations in the flavors/ subdirectories. resources: - ../crd diff --git a/operator/config/flavors/development_build/kustomization.yaml b/operator/config/flavors/development_build/kustomization.yaml new file mode 100644 index 0000000000000..8b4912bf36879 --- /dev/null +++ b/operator/config/flavors/development_build/kustomization.yaml @@ -0,0 +1,18 @@ +# Adds namespace to all resources. +namespace: rhacs-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: rhacs-operator- + +# Labels to add to all resources and selectors. +labels: +- includeSelectors: true + pairs: + app: rhacs-operator + +resources: +- ../../default diff --git a/operator/config/flavors/opensource/kustomization.yaml b/operator/config/flavors/opensource/kustomization.yaml new file mode 100644 index 0000000000000..23a08b1957b9d --- /dev/null +++ b/operator/config/flavors/opensource/kustomization.yaml @@ -0,0 +1,18 @@ +# Adds namespace to all resources. +namespace: stackrox-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: stackrox-operator- + +# Labels to add to all resources and selectors. +labels: +- includeSelectors: true + pairs: + app: stackrox-operator + +resources: +- ../../default diff --git a/operator/config/manifests/kustomization.yaml b/operator/config/manifests/kustomization.yaml index d1db429e3d995..639eb916655d8 100644 --- a/operator/config/manifests/kustomization.yaml +++ b/operator/config/manifests/kustomization.yaml @@ -2,7 +2,6 @@ # used to generate the 'manifests/' directory in a bundle. resources: - bases/rhacs-operator.clusterserviceversion.yaml -- ../default - ../samples - ../scorecard-versioned diff --git a/operator/install/README.md b/operator/install/README.md new file mode 100644 index 0000000000000..6911f515765cc --- /dev/null +++ b/operator/install/README.md @@ -0,0 +1,49 @@ +# Community StackRox Operator installation + +## Introduction + +Historically, Helm and the "manifest installation" methods were the only way to install the community, StackRox-branded build. +An operator was available only for the "Red Hat Advanced Cluster Security"-branded build. + +This is changing. Due to significant maintenance burden of three installation methods, +we are planning to consolidate on just one: the operator. + +As the first step, in the 4.10 release we are providing the simplest possible, _temporary_ way to install the community StackRox-branded operator. +We hope this is useful to the community for getting to know the operator before we provide a more customizable, powerful and unified way to install it in a subsequent release. + +## How to use it? + +Once 4.10 is released, installing the operator is simply a matter of: +```shell +kubectl apply -f https://github.com/stackrox/stackrox/raw/refs/tags/4.10.0/operator/install/manifest.yaml +kubectl rollout status deployment -n stackrox-operator-system stackrox-operator-controller-manager +``` + +## Where to go from here? + +Once the operator is running, to actually deploy StackRox you need to create a `Central` and a `SecuredCluster` custom resource. +Please have a look at the [samples](../config/samples) directory. + +Before applying the `SecuredCluster` CR you need to retrieve from central and apply on the cluster an init bundle or cluster registration secret. +**Note** that currently the page where you can generate an init bundle requires you to select OpenShift as the platform +Otherwise it is only possible to download an init bundle formatted for Helm installations. + +[Documentation for the custom resource schema](https://docs.redhat.com/en/documentation/red_hat_advanced_cluster_security_for_kubernetes/latest/html/installing/installing-rhacs-on-red-hat-openshift#install-central-config-options-ocp) - +the way to customize your StackRox deployment - is currently only available +at the Red Hat documentation portal. + +## Caveats + +You may encounter a few references to RH ACS when using the operator in places such as: +- the descriptions of a few fields in the OpenAPI schema of the custom resources +- the `UserAgent` header used by the operator controller when talking to the kube API server +- central web UI when generating init bundles or cluster registration secrets + +We hope to clean these up in the next release. + +## How was this manifest created? + +```shell +BUILD_TAG=4.10.0 ROX_PRODUCT_BRANDING=STACKROX_BRANDING make -C operator/ build-installer +cp operator/dist/install.yaml operator/install/manifest.yaml +``` diff --git a/operator/install/manifest.yaml b/operator/install/manifest.yaml new file mode 100644 index 0000000000000..363bcc35cbe5f --- /dev/null +++ b/operator/install/manifest.yaml @@ -0,0 +1,4635 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + app: stackrox-operator + control-plane: controller-manager + name: stackrox-operator-system +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + labels: + app: stackrox-operator + name: centrals.platform.stackrox.io +spec: + group: platform.stackrox.io + names: + kind: Central + listKind: CentralList + plural: centrals + singular: central + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.productVersion + name: Version + type: string + - jsonPath: .status.central.adminPassword.adminPasswordSecretReference + name: AdminPassword + type: string + - jsonPath: .status.conditions[?(@.type=="Deployed")].message + name: Message + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + Central is the configuration template for the central services. This includes the API server, persistent storage, + and the web UI, as well as the image scanner. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CentralSpec defines the desired state of Central + properties: + central: + description: Settings for the Central component, which is responsible + for all user interaction. + properties: + adminPasswordGenerationDisabled: + description: |- + Disable admin password generation. Do not use this for first-time installations, + as you will have no way to perform initial setup and configuration of alternative authentication methods. + type: boolean + adminPasswordSecret: + description: |- + Specify a secret that contains the administrator password in the "password" data item. + If omitted, the operator will auto-generate a password and store it in the "password" item + in the "central-htpasswd" secret. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + db: + description: Settings for Central DB, which is responsible for + data persistence. + properties: + configOverride: + description: Config map containing postgresql.conf and pg_hba.conf + that will be used if modifications need to be applied. + properties: + name: + description: The name of the referenced config map. + type: string + required: + - name + type: object + connectionPoolSize: + description: Configures the database connection pool size. + properties: + maxConnections: + description: |- + Maximum number of connections in the connection pool. + The default is: 90. + format: int32 + minimum: 1 + type: integer + minConnections: + description: |- + Minimum number of connections in the connection pool. + The default is: 10. + format: int32 + minimum: 1 + type: integer + required: + - maxConnections + - minConnections + type: object + connectionString: + description: |- + Specify a connection string that corresponds to a database managed elsewhere. If set, the operator will not manage the Central DB. + When using this option, you must explicitly set a password secret; automatically generating a password will not + be supported. + type: string + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + isEnabled: + description: |- + Obsolete field. + This field will be removed in a future release. + enum: + - Default + - Enabled + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + passwordSecret: + description: |- + Specify a secret that contains the password in the "password" data item. This can only be used when + specifying a connection string manually. + When omitted, the operator will auto-generate a DB password and store it in the "password" item + in the "central-db-password" secret. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + persistence: + description: |- + Configures how Central DB should store its persistent data. You can choose between using a persistent + volume claim (recommended default), and a host path. + properties: + hostPath: + description: |- + Stores persistent data in a directory on the host. This is not recommended, and should only + be used together with a node selector (only available in YAML view). + properties: + path: + description: The path on the host running Central. + type: string + type: object + persistentVolumeClaim: + description: |- + Uses a Kubernetes persistent volume claim (PVC) to manage the storage location of persistent data. + Recommended for most users. + properties: + claimName: + description: |- + The name of the PVC to manage persistent data. If no PVC with the given name exists, it will be + created. + The default is: central-db. + type: string + size: + description: |- + The size of the persistent volume when created through the claim. If a claim was automatically created, + this can be used after the initial deployment to resize (grow) the volume (only supported by some + storage class controllers). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + type: string + storageClassName: + description: |- + The name of the storage class to use for the PVC. If your cluster is not configured with a default storage + class, you must select a value here. + type: string + type: object + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + declarativeConfiguration: + description: Configures resources within Central in a declarative + manner. + properties: + configMaps: + description: List of config maps containing declarative configuration. + items: + description: LocalConfigMapReference is a reference to a + config map within the same namespace. + properties: + name: + description: The name of the referenced config map. + type: string + required: + - name + type: object + type: array + secrets: + description: List of secrets containing declarative configuration. + items: + description: LocalSecretReference is a reference to a secret + within the same namespace. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + type: array + type: object + defaultTLSSecret: + description: |- + By default, Central will only serve an internal TLS certificate, which means that you will + need to handle TLS termination at the ingress or load balancer level. + If you want to terminate TLS in Central and serve a custom server certificate, you can specify + a secret containing the certificate and private key here. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + exposure: + description: |- + Here you can configure if you want to expose central through a node port, a load balancer, or an OpenShift + route. + properties: + loadBalancer: + description: Expose Central through a load balancer service. + properties: + enabled: + description: 'The default is: false.' + type: boolean + ip: + description: If you have a static IP address reserved + for your load balancer, you can enter it here. + type: string + port: + description: 'The default is: 443.' + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + nodePort: + description: Expose Central through a node port. + properties: + enabled: + description: 'The default is: false.' + type: boolean + port: + description: Use this to specify an explicit node port. + Most users should leave this empty. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + type: object + route: + description: Expose Central through an OpenShift route. + properties: + enabled: + description: |- + Expose Central with a passthrough route. + The default is: false. + type: boolean + host: + description: |- + Specify a custom hostname for the Central route. + If unspecified, an appropriate default value will be automatically chosen by the OpenShift route operator. + type: string + reencrypt: + description: |- + Set up a Central route with reencrypt TLS termination. + For reencrypt routes, the request is terminated on the OpenShift router with a custom certificate. + The request is then reencrypted by the OpenShift router and sent to Central. + [user] --TLS--> [OpenShift router] --TLS--> [Central] + properties: + enabled: + description: |- + Expose Central with a reencrypt route. + Should not be used for sensor communication. + The default is: false. + type: boolean + host: + description: |- + Specify a custom hostname for the Central reencrypt route. + If unspecified, an appropriate default value will be automatically chosen by the OpenShift route operator. + type: string + tls: + description: TLS settings for exposing Central via + a reencrypt Route. + properties: + caCertificate: + description: |- + The PEM encoded certificate chain that may be used to establish a complete chain of trust. + Defaults to the OpenShift certificate authority. + type: string + certificate: + description: |- + The PEM encoded certificate that is served on the route. Must be a single serving + certificate instead of a certificate chain. + Defaults to a certificate signed by the OpenShift certificate authority. + type: string + destinationCACertificate: + description: |- + The CA certificate of the final destination, i.e. of Central. + Used by the OpenShift router for health checks on the secure connection. + Defaults to the Central certificate authority. + type: string + key: + description: |- + The PEM encoded private key of the certificate that is served on the route. + Defaults to a certificate signed by the OpenShift certificate authority. + type: string + type: object + type: object + type: object + type: object + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + monitoring: + description: |- + Configures monitoring endpoint for Central. The monitoring endpoint + allows other services to collect metrics from Central, provided in + Prometheus compatible format. + properties: + exposeEndpoint: + description: |- + Expose the monitoring endpoint. A new service, "monitoring", + with port 9090, will be created as well as a network policy allowing + inbound connections to the port. + enum: + - Enabled + - Disabled + type: string + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + notifierSecretsEncryption: + description: Configures the encryption of notifier secrets stored + in the Central DB. + properties: + enabled: + description: |- + Enables the encryption of notifier secrets stored in the Central DB. + The default is: false. + type: boolean + type: object + persistence: + description: Unused field. This field exists solely for backward + compatibility starting from version v4.6.0. + properties: + hostPath: + description: Obsolete unused field. + properties: + path: + description: The path on the host running Central. + type: string + type: object + persistentVolumeClaim: + description: Obsolete unused field. + properties: + claimName: + description: Obsolete unused field. + type: string + size: + description: Obsolete unused field. + type: string + storageClassName: + description: Obsolete unused field. + type: string + type: object + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + telemetry: + description: |- + Configures telemetry settings for Central. If enabled, Central transmits telemetry and diagnostic + data to a remote storage backend. + properties: + enabled: + description: |- + Specifies whether Telemetry is enabled. + The default is: true. + type: boolean + storage: + description: Defines the telemetry storage backend for Central. + properties: + endpoint: + description: Storage API endpoint. + type: string + key: + description: Storage API key. If not set, telemetry is + disabled. + type: string + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + configAsCode: + description: Config-as-Code configuration. + properties: + configAsCodeComponent: + description: |- + If you want to deploy the Config as Code component, set this to "Enabled" + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + customize: + description: Customizations to apply on all Central Services components. + properties: + annotations: + additionalProperties: + type: string + description: Custom annotations to set on all managed objects. + type: object + deploymentDefaults: + description: |- + Global nodeSelector and tolerations for Deployment-based components. DaemonSets (Collector) are not affected. + Component-level nodeSelector and tolerations settings override these defaults on a field-by-field basis. + properties: + nodeSelector: + additionalProperties: + type: string + description: |- + Default nodeSelector applied to all Deployment-based components. Use this for custom node + selection criteria. + Cannot be used together with pinToNodes. + type: object + pinToNodes: + description: |- + Pin all Deployment-based components to specific node types. This is a convenience setting + that automatically configures both nodeSelector and tolerations with predefined values. + Use this for common scenarios like running on OpenShift infrastructure nodes. + For custom node selection, use the explicit nodeSelector and tolerations fields instead. + Cannot be used together with nodeSelector or tolerations fields. + The default is: None. + enum: + - None + - InfraRole + type: string + tolerations: + description: |- + Default tolerations applied to all Deployment-based components. Use this when your target + nodes have custom taints that pods must tolerate. + Cannot be used together with pinToNodes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + envVars: + description: Custom environment variables to set on managed pods' + containers. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Custom labels to set on all managed objects. + type: object + type: object + egress: + description: Settings related to outgoing network traffic. + properties: + connectivityPolicy: + description: |- + Configures whether Red Hat Advanced Cluster Security should run in online or offline (disconnected) mode. + In offline mode, automatic updates of vulnerability definitions and kernel modules are disabled. + The default is: Online. + enum: + - Online + - Offline + type: string + type: object + imagePullSecrets: + description: Additional image pull secrets to be taken into account + for pulling images. + items: + description: LocalSecretReference is a reference to a secret within + the same namespace. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + type: array + misc: + description: |- + Deprecated field. This field will be removed in a future release. + Miscellaneous settings. + properties: + createSCCs: + description: |- + Deprecated field. This field will be removed in a future release. + Set this to true to have the operator create SecurityContextConstraints (SCCs) for the operands. This + isn't usually needed, and may interfere with other workloads. + type: boolean + type: object + monitoring: + description: Monitoring configuration. + properties: + openshift: + description: OpenShiftMonitoring defines settings related to OpenShift + Monitoring + properties: + enabled: + description: 'The default is: true.' + type: boolean + required: + - enabled + type: object + type: object + network: + description: Network configuration. + properties: + policies: + description: |- + To provide security at the network level, the ACS Operator creates NetworkPolicy resources by default. If you want to manage your own NetworkPolicy objects then set this to "Disabled". + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + type: object + overlays: + description: Overlays + items: + description: "K8sObjectOverlay is an overlay that applies a set + of patches to a resource.\nIt targets a resource by its API version, + kind, and name, and applies\na list of patches to this resource.\n\n# + Examples\n\n## Adding an annotation to a resource\n\n\tapiVersion: + v1\n\tkind: ServiceAccount\n\tname: central\n\tpatches:\n\t- path: + metadata.annotations.eks\\.amazonaws\\.com/role-arn\n\t value: + \"\\\"arn:aws:iam:1234:role\\\"\"\n\n## Adding an environment + variable to a deployment\n\n\tapiVersion: apps/v1\n\tkind: Deployment\n\tname: + central\n\tpatches:\n\t- path: spec.template.spec.containers[name:central].env[-1]\n\t + \ value: |\n\t name: MY_ENV_VAR\n\t value: value\n\n## Adding + an ingress to a network policy\n\n\tapiVersion: networking.k8s.io/v1\n\tkind: + NetworkPolicy\n\tname: allow-ext-to-central\n\tpatches:\n\t- path: + spec.ingress[-1]\n\t value: |\n\t ports:\n\t - port: 999\n\t + \ protocol: TCP\n\n## Changing the value of a ConfigMap\n\n\tapiVersion: + v1\n\tkind: ConfigMap\n\tname: central-endpoints\n\tpatches:\n\t- + path: data.endpoints\\.yaml:\n\t verbatim: |\n\t disableDefault: + false\n\n## Adding a container to a deployment\n\n\tapiVersion: + apps/v1\n\tkind: Deployment\n\tname: central\n\tpatches:\n\t - + path: spec.template.spec.containers[-1]\n\t value: |\n\t name: + nginx\n\t image: nginx\n\t ports:\n\t - containerPort: + 8000\n\t name: http\n\t protocol: TCP" + properties: + apiVersion: + description: Resource API version. + type: string + kind: + description: Resource kind. + type: string + name: + description: Name of resource. + type: string + optional: + description: |- + Optional marks the overlay as optional. + When Optional is true, and the specified resource does not exist in the output manifests, the overlay will be skipped, and a warning will be logged. + When Optional is false, and the specified resource does not exist in the output manifests, an error will be thrown. + type: boolean + patches: + description: List of patches to apply to resource. + items: + description: K8sObjectOverlayPatch defines a patch to apply + to a resource. + properties: + path: + description: |- + Path of the form a.[key1:value1].b.[:value2] + Where [key1:value1] is a selector for a key-value pair to identify a list element and [:value] is a value + selector to identify a list element in a leaf list. + All path intermediate nodes must exist. + type: string + value: + description: |- + Value to add, delete or replace. + For add, the path should be a new leaf. + For delete, value should be unset. + For replace, path should reference an existing node. + All values are strings but are converted into appropriate type based on schema. + type: string + verbatim: + description: |- + Verbatim value to add, delete or replace. + Same as Value, but the content is not interpreted as YAML and is treated as a literal string instead. + At least one of Value and Verbatim must be empty. + type: string + type: object + type: array + type: object + type: array + scanner: + description: |- + Settings for the Scanner component, which is responsible for vulnerability scanning of container + images. + properties: + analyzer: + description: Settings pertaining to the analyzer deployment, such + as for autoscaling. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + scaling: + description: Controls the number of analyzer replicas and + autoscaling. + properties: + autoScaling: + description: |- + When enabled, the number of component replicas is managed dynamically based on the load, within the limits + specified below. + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + maxReplicas: + description: 'The default is: 5.' + format: int32 + minimum: 1 + type: integer + minReplicas: + description: 'The default is: 2.' + format: int32 + minimum: 1 + type: integer + replicas: + description: |- + When autoscaling is disabled, the number of replicas will always be configured to match this value. + The default is: 3. + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + db: + description: Settings pertaining to the database used by the Red + Hat Advanced Cluster Security Scanner. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + monitoring: + description: |- + Configures monitoring endpoint for Scanner. The monitoring endpoint + allows other services to collect metrics from Scanner, provided in + Prometheus compatible format. + properties: + exposeEndpoint: + description: |- + Expose the monitoring endpoint. A new service, "monitoring", + with port 9090, will be created as well as a network policy allowing + inbound connections to the port. + enum: + - Enabled + - Disabled + type: string + type: object + scannerComponent: + description: |- + If you do not want to deploy the Red Hat Advanced Cluster Security Scanner, you can disable it here + (not recommended). By default, the scanner is enabled. + If you do so, all the settings in this section will have no effect. + enum: + - Enabled + - Disabled + type: string + type: object + scannerV4: + description: Settings for the Scanner V4 component, which can run + in addition to the previously existing Scanner components + properties: + db: + description: Settings pertaining to the DB deployment. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + persistence: + description: |- + Configures how Scanner V4 should store its persistent data. + You can use a persistent volume claim (the recommended default), a host path, + or an emptyDir volume if Scanner V4 is running on a secured cluster without default StorageClass. + properties: + hostPath: + description: |- + Stores persistent data in a directory on the host. This is not recommended, and should only + be used together with a node selector (only available in YAML view). + properties: + path: + description: The path on the host running Central. + type: string + type: object + persistentVolumeClaim: + description: |- + Uses a Kubernetes persistent volume claim (PVC) to manage the storage location of persistent data. + Recommended for most users. + properties: + claimName: + description: |- + The name of the PVC to manage persistent data. If no PVC with the given name exists, it will be + created. + The default is: scanner-v4-db. + type: string + size: + description: |- + The size of the persistent volume when created through the claim. If a claim was automatically created, + this can be used after the initial deployment to resize (grow) the volume (only supported by some + storage class controllers). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + type: string + storageClassName: + description: |- + The name of the storage class to use for the PVC. If your cluster is not configured with a default storage + class, you must select a value here. + type: string + type: object + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexer: + description: Settings pertaining to the indexer deployment. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + scaling: + description: Controls the number of replicas and autoscaling + for this component. + properties: + autoScaling: + description: |- + When enabled, the number of component replicas is managed dynamically based on the load, within the limits + specified below. + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + maxReplicas: + description: 'The default is: 5.' + format: int32 + minimum: 1 + type: integer + minReplicas: + description: 'The default is: 2.' + format: int32 + minimum: 1 + type: integer + replicas: + description: |- + When autoscaling is disabled, the number of replicas will always be configured to match this value. + The default is: 3. + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + matcher: + description: Settings pertaining to the matcher deployment. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + scaling: + description: Controls the number of replicas and autoscaling + for this component. + properties: + autoScaling: + description: |- + When enabled, the number of component replicas is managed dynamically based on the load, within the limits + specified below. + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + maxReplicas: + description: 'The default is: 5.' + format: int32 + minimum: 1 + type: integer + minReplicas: + description: 'The default is: 2.' + format: int32 + minimum: 1 + type: integer + replicas: + description: |- + When autoscaling is disabled, the number of replicas will always be configured to match this value. + The default is: 3. + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + monitoring: + description: |- + Configures monitoring endpoint for Scanner V4. The monitoring endpoint + allows other services to collect metrics from Scanner V4, provided in + Prometheus compatible format. + properties: + exposeEndpoint: + description: |- + Expose the monitoring endpoint. A new service, "monitoring", + with port 9090, will be created as well as a network policy allowing + inbound connections to the port. + enum: + - Enabled + - Disabled + type: string + type: object + scannerComponent: + description: |- + Can be specified as "Enabled" or "Disabled". + If this field is not specified, the following defaulting takes place: + * for new installations, Scanner V4 is enabled starting with ACS 4.8; + * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + enum: + - Default + - Enabled + - Disabled + type: string + type: object + tls: + description: Settings related to Transport Layer Security, such as + Certificate Authorities. + properties: + additionalCAs: + description: Allows you to specify additional trusted Root CAs. + items: + description: AdditionalCA defines a certificate for an additional + Certificate Authority. + properties: + content: + description: PEM format + type: string + name: + description: Must be a valid file basename + type: string + required: + - content + - name + type: object + type: array + type: object + type: object + status: + description: CentralStatus defines the observed state of Central. + properties: + central: + description: CentralComponentStatus describes status specific to the + central component. + properties: + adminPassword: + description: AdminPassword stores information related to the auto-generated + admin password. + properties: + adminPasswordSecretReference: + description: AdminPasswordSecretReference contains reference + for the admin password + type: string + info: + description: Info stores information on how to obtain the + admin password. + type: string + type: object + type: object + conditions: + items: + description: StackRoxCondition defines a condition for a StackRox + custom resource. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a type of values of condition + reason. + type: string + status: + description: ConditionStatus is a type of values of condition + status. + type: string + type: + description: ConditionType is a type of values of condition + type. + type: string + required: + - status + - type + type: object + type: array + deployedRelease: + description: StackRoxRelease describes the Helm "release" that was + most recently applied. + properties: + version: + type: string + type: object + observedGeneration: + description: ObservedGeneration is the generation most recently observed + by the controller. + format: int64 + type: integer + productVersion: + description: The deployed version of the product. + type: string + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + labels: + app: stackrox-operator + name: securedclusters.platform.stackrox.io +spec: + group: platform.stackrox.io + names: + kind: SecuredCluster + listKind: SecuredClusterList + plural: securedclusters + singular: securedcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.productVersion + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Deployed")].message + name: Message + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + SecuredCluster is the configuration template for the secured cluster services. These include Sensor, which is + responsible for the connection to Central, and Collector, which performs host-level collection of process and + network events.

+ **Important:** Please see the _Installation Prerequisites_ on the main RHACS operator page before deploying, or + consult the RHACS documentation on creating cluster init bundles. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecuredClusterSpec defines the desired configuration state + of a secured cluster. + properties: + admissionControl: + description: |- + Settings for the Admission Control component, which is necessary for preventive policy enforcement, + and for Kubernetes event monitoring. + properties: + bypass: + description: |- + Enables teams to bypass admission control in a monitored manner in the event of an emergency. + The default is: BreakGlassAnnotation. + enum: + - BreakGlassAnnotation + - Disabled + type: string + contactImageScanners: + description: Deprecated field. This field will be removed in a + future release. + enum: + - ScanIfMissing + - DoNotScanInline + type: string + enforcement: + description: |- + Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. + On new deployments starting with version 4.9, defaults to Enabled. + On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + enum: + - Enabled + - Disabled + type: string + failurePolicy: + description: |- + If set to "Fail", the admission controller's webhooks are configured to fail-closed in case admission controller + fails to respond in time. A failure policy "Ignore" configures the webhooks to fail-open. + The default is: Ignore. + enum: + - Ignore + - Fail + type: string + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + listenOnCreates: + description: Deprecated field. This field will be removed in a + future release. + type: boolean + listenOnEvents: + description: Deprecated field. This field will be removed in a + future release. + type: boolean + listenOnUpdates: + description: Deprecated field. This field will be removed in a + future release. + type: boolean + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + replicas: + description: |- + The number of replicas of the admission control pod. + The default is: 3. + format: int32 + minimum: 1 + type: integer + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + timeoutSeconds: + description: Deprecated field. This field will be removed in a + future release. + format: int32 + type: integer + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + auditLogs: + description: Settings relating to the ingestion of Kubernetes audit + logs. + properties: + collection: + description: |- + Whether collection of Kubernetes audit logs should be enabled or disabled. Currently, this is only + supported on OpenShift 4, and trying to enable it on non-OpenShift 4 clusters will result in an error. + Use the 'Auto' setting to enable it on compatible environments, and disable it elsewhere. + The default is: Auto. + enum: + - Auto + - Disabled + - Enabled + type: string + type: object + centralEndpoint: + description: |- + The endpoint of the Red Hat Advanced Cluster Security Central instance to connect to, + including the port number. If no port is specified and the endpoint contains an https:// + protocol specification, then the port 443 is implicitly assumed. + If using a non-gRPC capable load balancer, use the WebSocket protocol by prefixing the endpoint + address with wss://. + Note: when leaving this blank, Sensor will attempt to connect to a Central instance running in the same + namespace. + type: string + clusterLabels: + additionalProperties: + type: string + description: Custom labels associated with a secured cluster in Red + Hat Advanced Cluster Security. + type: object + clusterName: + description: |- + The unique name of this cluster, as it will be shown in the Red Hat Advanced Cluster Security UI. + Note: Once a name is set here, you will not be able to change it again. You will need to delete + and re-create this object in order to register a cluster with a new name. + type: string + customize: + description: Customizations to apply on all Secured Cluster Services + components. + properties: + annotations: + additionalProperties: + type: string + description: Custom annotations to set on all managed objects. + type: object + deploymentDefaults: + description: |- + Global nodeSelector and tolerations for Deployment-based components. DaemonSets (Collector) are not affected. + Component-level nodeSelector and tolerations settings override these defaults on a field-by-field basis. + properties: + nodeSelector: + additionalProperties: + type: string + description: |- + Default nodeSelector applied to all Deployment-based components. Use this for custom node + selection criteria. + Cannot be used together with pinToNodes. + type: object + pinToNodes: + description: |- + Pin all Deployment-based components to specific node types. This is a convenience setting + that automatically configures both nodeSelector and tolerations with predefined values. + Use this for common scenarios like running on OpenShift infrastructure nodes. + For custom node selection, use the explicit nodeSelector and tolerations fields instead. + Cannot be used together with nodeSelector or tolerations fields. + The default is: None. + enum: + - None + - InfraRole + type: string + tolerations: + description: |- + Default tolerations applied to all Deployment-based components. Use this when your target + nodes have custom taints that pods must tolerate. + Cannot be used together with pinToNodes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + envVars: + description: Custom environment variables to set on managed pods' + containers. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Custom labels to set on all managed objects. + type: object + type: object + imagePullSecrets: + description: Additional image pull secrets to be taken into account + for pulling images. + items: + description: LocalSecretReference is a reference to a secret within + the same namespace. + properties: + name: + description: The name of the referenced secret. + type: string + required: + - name + type: object + type: array + misc: + description: |- + Deprecated field. This field will be removed in a future release. + Miscellaneous settings. + properties: + createSCCs: + description: |- + Deprecated field. This field will be removed in a future release. + Set this to true to have the operator create SecurityContextConstraints (SCCs) for the operands. This + isn't usually needed, and may interfere with other workloads. + type: boolean + type: object + monitoring: + description: Monitoring configuration. + properties: + openshift: + description: OpenShiftMonitoring defines settings related to OpenShift + Monitoring + properties: + enabled: + description: 'The default is: true.' + type: boolean + required: + - enabled + type: object + type: object + network: + description: Network configuration. + properties: + policies: + description: |- + To provide security at the network level, the ACS Operator creates NetworkPolicy resources by default. If you want to manage your own NetworkPolicy objects then set this to "Disabled". + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + type: object + overlays: + description: Overlays + items: + description: "K8sObjectOverlay is an overlay that applies a set + of patches to a resource.\nIt targets a resource by its API version, + kind, and name, and applies\na list of patches to this resource.\n\n# + Examples\n\n## Adding an annotation to a resource\n\n\tapiVersion: + v1\n\tkind: ServiceAccount\n\tname: central\n\tpatches:\n\t- path: + metadata.annotations.eks\\.amazonaws\\.com/role-arn\n\t value: + \"\\\"arn:aws:iam:1234:role\\\"\"\n\n## Adding an environment + variable to a deployment\n\n\tapiVersion: apps/v1\n\tkind: Deployment\n\tname: + central\n\tpatches:\n\t- path: spec.template.spec.containers[name:central].env[-1]\n\t + \ value: |\n\t name: MY_ENV_VAR\n\t value: value\n\n## Adding + an ingress to a network policy\n\n\tapiVersion: networking.k8s.io/v1\n\tkind: + NetworkPolicy\n\tname: allow-ext-to-central\n\tpatches:\n\t- path: + spec.ingress[-1]\n\t value: |\n\t ports:\n\t - port: 999\n\t + \ protocol: TCP\n\n## Changing the value of a ConfigMap\n\n\tapiVersion: + v1\n\tkind: ConfigMap\n\tname: central-endpoints\n\tpatches:\n\t- + path: data.endpoints\\.yaml:\n\t verbatim: |\n\t disableDefault: + false\n\n## Adding a container to a deployment\n\n\tapiVersion: + apps/v1\n\tkind: Deployment\n\tname: central\n\tpatches:\n\t - + path: spec.template.spec.containers[-1]\n\t value: |\n\t name: + nginx\n\t image: nginx\n\t ports:\n\t - containerPort: + 8000\n\t name: http\n\t protocol: TCP" + properties: + apiVersion: + description: Resource API version. + type: string + kind: + description: Resource kind. + type: string + name: + description: Name of resource. + type: string + optional: + description: |- + Optional marks the overlay as optional. + When Optional is true, and the specified resource does not exist in the output manifests, the overlay will be skipped, and a warning will be logged. + When Optional is false, and the specified resource does not exist in the output manifests, an error will be thrown. + type: boolean + patches: + description: List of patches to apply to resource. + items: + description: K8sObjectOverlayPatch defines a patch to apply + to a resource. + properties: + path: + description: |- + Path of the form a.[key1:value1].b.[:value2] + Where [key1:value1] is a selector for a key-value pair to identify a list element and [:value] is a value + selector to identify a list element in a leaf list. + All path intermediate nodes must exist. + type: string + value: + description: |- + Value to add, delete or replace. + For add, the path should be a new leaf. + For delete, value should be unset. + For replace, path should reference an existing node. + All values are strings but are converted into appropriate type based on schema. + type: string + verbatim: + description: |- + Verbatim value to add, delete or replace. + Same as Value, but the content is not interpreted as YAML and is treated as a literal string instead. + At least one of Value and Verbatim must be empty. + type: string + type: object + type: array + type: object + type: array + perNode: + description: Settings for the components running on each node in the + cluster (Collector and Compliance). + properties: + collector: + description: |- + Settings for the Collector container, which is responsible for collecting process and networking + activity at the host level. + properties: + collection: + description: |- + The method for system-level data collection. CORE_BPF is recommended. + If you select "NoCollection", you will not be able to see any information about network activity + and process executions. The remaining settings in this section will not have any effect. + The value is a subject of conversion by the operator if needed, e.g. to + remove deprecated methods. + The default is: CORE_BPF. + enum: + - EBPF + - CORE_BPF + - NoCollection + - KernelModule + type: string + forceCollection: + description: Obsolete field. This field will be removed in + a future release. + type: boolean + imageFlavor: + description: Obsolete field. + enum: + - Regular + - Slim + type: string + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + compliance: + description: Settings for the Compliance container, which is responsible + for checking host-level configurations. + properties: + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeInventory: + description: Settings for the Node-Inventory container, which + is responsible for scanning the Nodes' filesystem. + properties: + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + sfa: + description: Settings for the Sensitive File Activity container, + which is responsible for file activity monitoring on the Node. + properties: + agent: + description: |- + Specifies whether Sensitive File Activity agent is deployed. + The default is: Disabled. + enum: + - Enabled + - Disabled + type: string + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + type: object + taintToleration: + description: |- + To ensure comprehensive monitoring of your cluster activity, Red Hat Advanced Cluster Security + will run services on every node in the cluster, including tainted nodes by default. If you do + not want this behavior, please select 'AvoidTaints' here. + The default is: TolerateTaints. + enum: + - TolerateTaints + - AvoidTaints + type: string + type: object + processBaselines: + description: Settings relating to process baselines. + properties: + autoLock: + description: |- + Should process baselines be automatically locked when the observation period (1 hour by default) ends. + The default is: Disabled. + enum: + - Enabled + - Disabled + type: string + type: object + registryOverride: + description: Set this parameter to override the default registry in + images. For example, nginx:latest -> /library/nginx:latest + type: string + scanner: + description: |- + Settings for the Scanner component, which is responsible for vulnerability scanning of container + images stored in a cluster-local image repository. + properties: + analyzer: + description: Settings pertaining to the analyzer deployment, such + as for autoscaling. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + scaling: + description: Controls the number of analyzer replicas and + autoscaling. + properties: + autoScaling: + description: |- + When enabled, the number of component replicas is managed dynamically based on the load, within the limits + specified below. + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + maxReplicas: + description: 'The default is: 5.' + format: int32 + minimum: 1 + type: integer + minReplicas: + description: 'The default is: 2.' + format: int32 + minimum: 1 + type: integer + replicas: + description: |- + When autoscaling is disabled, the number of replicas will always be configured to match this value. + The default is: 3. + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + db: + description: Settings pertaining to the database used by the Red + Hat Advanced Cluster Security Scanner. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + scannerComponent: + description: |- + If you do not want to deploy the Red Hat Advanced Cluster Security Scanner, you can disable it here + (not recommended). + If you do so, all the settings in this section will have no effect. + The default is: AutoSense. + enum: + - AutoSense + - Disabled + type: string + type: object + scannerV4: + description: Settings for the Scanner V4 components, which can run + in addition to the previously existing Scanner components + properties: + db: + description: Settings pertaining to the DB deployment. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + persistence: + description: |- + Configures how Scanner V4 should store its persistent data. + You can use a persistent volume claim (the recommended default), a host path, + or an emptyDir volume if Scanner V4 is running on a secured cluster without default StorageClass. + properties: + hostPath: + description: |- + Stores persistent data in a directory on the host. This is not recommended, and should only + be used together with a node selector (only available in YAML view). + properties: + path: + description: The path on the host running Central. + type: string + type: object + persistentVolumeClaim: + description: |- + Uses a Kubernetes persistent volume claim (PVC) to manage the storage location of persistent data. + Recommended for most users. + properties: + claimName: + description: |- + The name of the PVC to manage persistent data. If no PVC with the given name exists, it will be + created. + The default is: scanner-v4-db. + type: string + size: + description: |- + The size of the persistent volume when created through the claim. If a claim was automatically created, + this can be used after the initial deployment to resize (grow) the volume (only supported by some + storage class controllers). + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + type: string + storageClassName: + description: |- + The name of the storage class to use for the PVC. If your cluster is not configured with a default storage + class, you must select a value here. + type: string + type: object + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + indexer: + description: Settings pertaining to the indexer deployment. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + scaling: + description: Controls the number of replicas and autoscaling + for this component. + properties: + autoScaling: + description: |- + When enabled, the number of component replicas is managed dynamically based on the load, within the limits + specified below. + The default is: Enabled. + enum: + - Enabled + - Disabled + type: string + maxReplicas: + description: 'The default is: 5.' + format: int32 + minimum: 1 + type: integer + minReplicas: + description: 'The default is: 2.' + format: int32 + minimum: 1 + type: integer + replicas: + description: |- + When autoscaling is disabled, the number of replicas will always be configured to match this value. + The default is: 3. + format: int32 + minimum: 1 + type: integer + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + monitoring: + description: |- + Configures monitoring endpoint for Scanner V4. The monitoring endpoint + allows other services to collect metrics from Scanner V4, provided in + Prometheus compatible format. + properties: + exposeEndpoint: + description: |- + Expose the monitoring endpoint. A new service, "monitoring", + with port 9090, will be created as well as a network policy allowing + inbound connections to the port. + enum: + - Enabled + - Disabled + type: string + type: object + scannerComponent: + description: |- + If you want to enable the Scanner V4 component set this to "AutoSense" + If this field is not specified or set to "Default", the following defaulting takes place: + * for new installations, Scanner V4 is enabled starting with ACS 4.8; + * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + enum: + - Default + - AutoSense + - Disabled + type: string + type: object + sensor: + description: Settings for the Sensor component. + properties: + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeSelector: + additionalProperties: + type: string + description: |- + If you want this component to only run on specific nodes, you can configure a node selector here. + This setting overrides spec.customize.deploymentDefaults.nodeSelector. + type: object + resources: + description: |- + Allows overriding the default resource settings for this component. Please consult the documentation + for an overview of default resource requirements and a sizing guide. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tolerations: + description: |- + If you want this component to only run on specific nodes, you can configure tolerations of tainted nodes. + This setting overrides spec.customize.deploymentDefaults.tolerations. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators). + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + tls: + description: Settings related to Transport Layer Security, such as + Certificate Authorities. + properties: + additionalCAs: + description: Allows you to specify additional trusted Root CAs. + items: + description: AdditionalCA defines a certificate for an additional + Certificate Authority. + properties: + content: + description: PEM format + type: string + name: + description: Must be a valid file basename + type: string + required: + - content + - name + type: object + type: array + type: object + required: + - clusterName + type: object + status: + description: SecuredClusterStatus defines the observed state of SecuredCluster + properties: + clusterName: + description: |- + The assigned cluster name per the spec. This cannot be changed afterwards. If you need to change the + cluster name, please delete and recreate this resource. + type: string + conditions: + items: + description: StackRoxCondition defines a condition for a StackRox + custom resource. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + description: ConditionReason is a type of values of condition + reason. + type: string + status: + description: ConditionStatus is a type of values of condition + status. + type: string + type: + description: ConditionType is a type of values of condition + type. + type: string + required: + - status + - type + type: object + type: array + deployedRelease: + description: StackRoxRelease describes the Helm "release" that was + most recently applied. + properties: + version: + type: string + type: object + observedGeneration: + description: ObservedGeneration is the generation most recently observed + by the controller. + format: int64 + type: integer + productVersion: + description: The deployed version of the product. + type: string + required: + - conditions + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.20.0 + labels: + app: stackrox-operator + name: securitypolicies.config.stackrox.io +spec: + group: config.stackrox.io + names: + kind: SecurityPolicy + listKind: SecurityPolicyList + plural: securitypolicies + shortNames: + - sp + singular: securitypolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: SecurityPolicy is the Schema for the policies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: SecurityPolicySpec defines the desired state of SecurityPolicy + properties: + categories: + description: Categories is a list of categories that this policy falls + under. Category names must already exist in Central. + items: + type: string + minItems: 1 + type: array + criteriaLocked: + description: CriteriaLocked is unused and deprecated + type: boolean + description: + description: Description is a free-form text description of this policy. + pattern: ^[^\$]{0,800}$ + type: string + disabled: + description: Disabled toggles whether or not this policy will be executing + and actively firing alerts. + type: boolean + enforcementActions: + description: Enforcement lists the enforcement actions to take when + a violation from this policy is identified. Possible value are + UNSET_ENFORCEMENT, SCALE_TO_ZERO_ENFORCEMENT, UNSATISFIABLE_NODE_CONSTRAINT_ENFORCEMENT, + KILL_POD_ENFORCEMENT, FAIL_BUILD_ENFORCEMENT, FAIL_KUBE_REQUEST_ENFORCEMENT, + FAIL_DEPLOYMENT_CREATE_ENFORCEMENT, and. FAIL_DEPLOYMENT_UPDATE_ENFORCEMENT. + items: + enum: + - UNSET_ENFORCEMENT + - SCALE_TO_ZERO_ENFORCEMENT + - UNSATISFIABLE_NODE_CONSTRAINT_ENFORCEMENT + - KILL_POD_ENFORCEMENT + - FAIL_BUILD_ENFORCEMENT + - FAIL_KUBE_REQUEST_ENFORCEMENT + - FAIL_DEPLOYMENT_CREATE_ENFORCEMENT + - FAIL_DEPLOYMENT_UPDATE_ENFORCEMENT + type: string + type: array + eventSource: + description: EventSource describes which events should trigger execution + of this policy + enum: + - NOT_APPLICABLE + - DEPLOYMENT_EVENT + - AUDIT_LOG_EVENT + type: string + exclusions: + description: Exclusions define deployments or images that should be + excluded from this policy. + items: + properties: + deployment: + properties: + name: + type: string + scope: + properties: + cluster: + description: Cluster is either the name or the ID of + the cluster that this scope applies to + type: string + label: + properties: + key: + type: string + value: + type: string + type: object + namespace: + type: string + type: object + type: object + expiration: + format: date-time + type: string + image: + properties: + name: + type: string + type: object + name: + type: string + type: object + type: array + isDefault: + description: IsDefault is unused + type: boolean + lifecycleStages: + description: LifecycleStages describes which policy lifecylce stages + this policy applies to. Choices are DEPLOY, BUILD, and RUNTIME. + items: + enum: + - DEPLOY + - BUILD + - RUNTIME + type: string + minItems: 1 + type: array + mitreAttackVectors: + items: + properties: + tactic: + type: string + techniques: + items: + type: string + type: array + type: object + type: array + mitreVectorsLocked: + description: MitreVetorsLocked is unused and deprecated + type: boolean + notifiers: + description: Notifiers is a list of IDs or names of the notifiers + that should be triggered when a violation from this policy is identified. IDs + should be in the form of a UUID and are found through the Central + API. + items: + type: string + type: array + policyName: + description: PolicyName is the name of the policy as it appears in + the API and UI. Note that changing this value will rename the policy + as stored in the database. This field must be unique. + pattern: ^[^\n\r\$]{5,128}$ + type: string + policySections: + description: PolicySections define the violation criteria for this + policy. + items: + properties: + policyGroups: + description: PolicyGroups is the set of policies groups that + make up this section. Each group can be considered an individual + criterion. + items: + properties: + booleanOperator: + description: BooleanOperator determines if the values + are combined with an OR or an AND. Defaults to OR. + enum: + - OR + - AND + type: string + fieldName: + description: FieldName defines which field on a deployment + or image this PolicyGroup evaluates. See https://docs.openshift.com/acs/operating/manage-security-policies.html#policy-criteria_manage-security-policies + for a complete list of possible values. + type: string + negate: + description: Negate determines if the evaluation of this + PolicyGroup is negated. Default to false. + type: boolean + values: + description: Values is the list of values for the specified + field + items: + properties: + value: + description: Value is simply the string value + type: string + type: object + type: array + required: + - fieldName + type: object + type: array + sectionName: + description: SectionName is a user-friendly name for this section + of policies + type: string + required: + - policyGroups + type: object + minItems: 1 + type: array + rationale: + type: string + remediation: + description: Remediation describes how to remediate a violation of + this policy. + type: string + scope: + description: Scope defines clusters, namespaces, and deployments that + should be included in this policy. No scopes defined includes everything. + items: + properties: + cluster: + description: Cluster is either the name or the ID of the cluster + that this scope applies to + type: string + label: + properties: + key: + type: string + value: + type: string + type: object + namespace: + type: string + type: object + type: array + severity: + description: Severity defines how severe a violation from this policy + is. Possible values are UNSET_SEVERITY, LOW_SEVERITY, MEDIUM_SEVERITY, + HIGH_SEVERITY, and CRITICAL_SEVERITY. + enum: + - UNSET_SEVERITY + - LOW_SEVERITY + - MEDIUM_SEVERITY + - HIGH_SEVERITY + - CRITICAL_SEVERITY + type: string + required: + - categories + - lifecycleStages + - policyName + - policySections + - severity + type: object + status: + properties: + accepted: + description: Accepted is deprecated in favor of conditions + type: boolean + conditions: + items: + description: SecurityPolicyCondition defines the observed state + of SecurityPolicy + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + status: + type: string + type: + type: string + type: object + type: array + message: + description: Message is deprecated in favor of conditions + type: string + policyId: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-controller-manager + namespace: stackrox-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-leader-election-role + namespace: stackrox-operator-system +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-manager-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- apiGroups: + - platform.stackrox.io + resources: + - centrals + - securedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - platform.stackrox.io + resources: + - centrals/finalizers + - securedclusters/finalizers + verbs: + - update +- apiGroups: + - platform.stackrox.io + resources: + - centrals/status + - securedclusters/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-metrics-reader +rules: +- nonResourceURLs: + - /metrics + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-leader-election-rolebinding + namespace: stackrox-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stackrox-operator-leader-election-role +subjects: +- kind: ServiceAccount + name: stackrox-operator-controller-manager + namespace: stackrox-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: stackrox-operator-manager-role +subjects: +- kind: ServiceAccount + name: stackrox-operator-controller-manager + namespace: stackrox-operator-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: stackrox-operator + name: stackrox-operator-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: stackrox-operator-proxy-role +subjects: +- kind: ServiceAccount + name: stackrox-operator-controller-manager + namespace: stackrox-operator-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: stackrox-operator + control-plane: controller-manager + name: stackrox-operator-controller-manager-metrics-service + namespace: stackrox-operator-system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + app: stackrox-operator + control-plane: controller-manager +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: stackrox-operator + control-plane: controller-manager + name: stackrox-operator-controller-manager + namespace: stackrox-operator-system +spec: + replicas: 1 + selector: + matchLabels: + app: stackrox-operator + control-plane: controller-manager + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + app: stackrox-operator + control-plane: controller-manager + spec: + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=0.0.0.0:8443 + - --leader-elect + env: + - name: RELATED_IMAGE_MAIN + - name: RELATED_IMAGE_SCANNER + - name: RELATED_IMAGE_SCANNER_SLIM + - name: RELATED_IMAGE_SCANNER_DB + - name: RELATED_IMAGE_SCANNER_DB_SLIM + - name: RELATED_IMAGE_COLLECTOR + - name: RELATED_IMAGE_ROXCTL + - name: RELATED_IMAGE_CENTRAL_DB + - name: RELATED_IMAGE_SCANNER_V4_DB + - name: RELATED_IMAGE_SCANNER_V4 + - name: MEMORY_LIMIT_BYTES + valueFrom: + resourceFieldRef: + containerName: manager + resource: limits.memory + image: quay.io/stackrox-io/stackrox-operator:4.10.0 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + ports: + - containerPort: 8443 + name: https + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 200m + memory: 1Gi + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + serviceAccountName: stackrox-operator-controller-manager + terminationGracePeriodSeconds: 10 diff --git a/operator/tests/controller/metrics/200-access-no-auth.yaml b/operator/tests/controller/metrics/200-access-no-auth.gotmpl.yaml similarity index 87% rename from operator/tests/controller/metrics/200-access-no-auth.yaml rename to operator/tests/controller/metrics/200-access-no-auth.gotmpl.yaml index 3c07b9c48dfb8..6203167335e22 100644 --- a/operator/tests/controller/metrics/200-access-no-auth.yaml +++ b/operator/tests/controller/metrics/200-access-no-auth.gotmpl.yaml @@ -15,10 +15,8 @@ spec: args: - bash - "-c" - # TODO(ROX-22287): use $TEST_NAMESPACE from Makefile once templating is supported - >- - operator_ns="stackrox-operator" - url="https://rhacs-operator-controller-manager-metrics-service.$operator_ns.svc.cluster.local:8443/metrics"; + url="https://rhacs-operator-controller-manager-metrics-service.{{ .Vars.operator_ns }}.svc.cluster.local:8443/metrics"; set -u; curl --version; for attempt in $(seq 5); do diff --git a/operator/tests/controller/metrics/200-access-no-http2.yaml b/operator/tests/controller/metrics/200-access-no-http2.gotmpl.yaml similarity index 87% rename from operator/tests/controller/metrics/200-access-no-http2.yaml rename to operator/tests/controller/metrics/200-access-no-http2.gotmpl.yaml index ca45cefd10227..4039432f851e7 100644 --- a/operator/tests/controller/metrics/200-access-no-http2.yaml +++ b/operator/tests/controller/metrics/200-access-no-http2.gotmpl.yaml @@ -15,10 +15,8 @@ spec: args: - bash - "-c" - # TODO(ROX-22287): use $TEST_NAMESPACE from Makefile once templating is supported - >- - operator_ns="stackrox-operator" - url="https://rhacs-operator-controller-manager-metrics-service.$operator_ns.svc.cluster.local:8443/metrics"; + url="https://rhacs-operator-controller-manager-metrics-service.{{ .Vars.operator_ns }}.svc.cluster.local:8443/metrics"; set -u; curl --version; for attempt in $(seq 5); do diff --git a/operator/tests/controller/metrics/200-access-privileged.yaml b/operator/tests/controller/metrics/200-access-privileged.gotmpl.yaml similarity index 88% rename from operator/tests/controller/metrics/200-access-privileged.yaml rename to operator/tests/controller/metrics/200-access-privileged.gotmpl.yaml index b611a88b151c5..1195c8b3364c1 100644 --- a/operator/tests/controller/metrics/200-access-privileged.yaml +++ b/operator/tests/controller/metrics/200-access-privileged.gotmpl.yaml @@ -15,10 +15,8 @@ spec: args: - bash - "-c" - # TODO(ROX-22287): use $TEST_NAMESPACE from Makefile once templating is supported - >- - operator_ns="stackrox-operator" - url="https://rhacs-operator-controller-manager-metrics-service.$operator_ns.svc.cluster.local:8443/metrics"; + url="https://rhacs-operator-controller-manager-metrics-service.{{ .Vars.operator_ns }}.svc.cluster.local:8443/metrics"; set -eu; curl --version; token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"; diff --git a/operator/tests/controller/metrics/200-access-unprivileged.yaml b/operator/tests/controller/metrics/200-access-unprivileged.gotmpl.yaml similarity index 89% rename from operator/tests/controller/metrics/200-access-unprivileged.yaml rename to operator/tests/controller/metrics/200-access-unprivileged.gotmpl.yaml index 1a374f2420322..836d6d14feffd 100644 --- a/operator/tests/controller/metrics/200-access-unprivileged.yaml +++ b/operator/tests/controller/metrics/200-access-unprivileged.gotmpl.yaml @@ -15,10 +15,8 @@ spec: args: - bash - "-c" - # TODO(ROX-22287): use $TEST_NAMESPACE from Makefile once templating is supported - >- - operator_ns="stackrox-operator" - url="https://rhacs-operator-controller-manager-metrics-service.$operator_ns.svc.cluster.local:8443/metrics"; + url="https://rhacs-operator-controller-manager-metrics-service.{{ .Vars.operator_ns }}.svc.cluster.local:8443/metrics"; set -u; curl --version; token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"; diff --git a/operator/tests/run.sh b/operator/tests/run.sh index 60147a6b793d3..6a7ce3d5de4af 100755 --- a/operator/tests/run.sh +++ b/operator/tests/run.sh @@ -11,6 +11,8 @@ source "$ROOT/scripts/lib.sh" source "$ROOT/scripts/ci/lib.sh" test_operator_e2e() { + operator_cluster_type="$1" + info "Starting operator e2e tests" require_environment "KUBECONFIG" @@ -30,31 +32,44 @@ _EO_KUTTL_HELP_ image_prefetcher_prebuilt_await - info "Deploying operator" - junit_wrap deploy-previous-operator \ - "Deploy previously released version of the operator." \ - "${kuttl_help}" \ - "make" "-C" "operator" "deploy-previous-via-olm" + # TODO(ROX-27191): Once there *is* a previous version shipping a non-OLM operator distribution, + # use it on other platforms too and run the upgrade test as well. + if [[ $operator_cluster_type == openshift4 ]]; then + info "Deploying operator" + junit_wrap deploy-previous-operator \ + "Deploy previously released version of the operator." \ + "${kuttl_help}" \ + "make" "-C" "operator" "deploy-previous-via-olm" TEST_NAMESPACE="rhacs-operator-system" + fi image_prefetcher_system_await - info "Executing operator upgrade test" - junit_wrap test-upgrade \ - "Test operator upgrade from previously released version to the current one." \ - "${kuttl_help}" \ - "make" "-C" "operator" "test-upgrade" || FAILED=1 - store_test_results "operator/build/kuttl-test-artifacts-upgrade" "kuttl-test-artifacts-upgrade" - if junit_contains_failure "$(stored_test_results "kuttl-test-artifacts-upgrade")"; then - # Prevent double-reporting - remove_junit_record test-upgrade + if [[ $operator_cluster_type == openshift4 ]]; then + info "Executing operator upgrade test" + junit_wrap test-upgrade \ + "Test operator upgrade from previously released version to the current one." \ + "${kuttl_help}" \ + "make" "-C" "operator" "test-upgrade" TEST_NAMESPACE="rhacs-operator-system" || FAILED=1 + store_test_results "operator/build/kuttl-test-artifacts-upgrade" "kuttl-test-artifacts-upgrade" + if junit_contains_failure "$(stored_test_results "kuttl-test-artifacts-upgrade")"; then + # Prevent double-reporting + remove_junit_record test-upgrade + fi + [[ $FAILED = 0 ]] || die "operator upgrade tests failed" + else + info "Deploying operator using manifests..." + junit_wrap deploy-operator \ + "Deploy current version of the operator." \ + "${kuttl_help}" \ + "make" "-C" "operator" "build-installer" "deploy-via-installer" TEST_NAMESPACE="rhacs-operator-system" fi - [[ $FAILED = 0 ]] || die "operator upgrade tests failed" info "Executing operator e2e tests" junit_wrap test-e2e \ "Run operator E2E tests." \ "${kuttl_help}" \ - "make" "-C" "operator" "test-e2e-deployed" || FAILED=1 + "make" "-C" "operator" "test-e2e-deployed" TEST_NAMESPACE="rhacs-operator-system" || FAILED=1 + # TODO(ROX-11901): determine the test namespace above based on branding, to make it possible to e2e-test the community build store_test_results "operator/build/kuttl-test-artifacts" "kuttl-test-artifacts" if junit_contains_failure "$(stored_test_results "kuttl-test-artifacts")"; then # Prevent double-reporting @@ -71,5 +86,5 @@ _EO_KUTTL_HELP_ } if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then - test_operator_e2e "$*" + test_operator_e2e "$@" fi diff --git a/tests/e2e/lib.sh b/tests/e2e/lib.sh index c0f1cf4656f21..e373787fd7fe3 100755 --- a/tests/e2e/lib.sh +++ b/tests/e2e/lib.sh @@ -213,6 +213,7 @@ deploy_stackrox_operator() { ocp_version=$(kubectl get clusterversion -o=jsonpath='{.items[0].status.desired.version}' | cut -d '.' -f 1,2) make -C operator kuttl deploy-via-olm \ + TEST_NAMESPACE="rhacs-operator-system" \ INDEX_IMG_BASE="quay.io/rhacs-eng/stackrox-operator-index" \ INDEX_IMG_TAG="$(< operator/midstream/iib.json jq -r --arg version "$ocp_version" '.iibs[$version]')" \ INSTALL_CHANNEL="$(< operator/midstream/iib.json jq -r '.operator.channel')" \ @@ -220,6 +221,7 @@ deploy_stackrox_operator() { else info "Deploying ACS operator" make -C operator kuttl deploy-via-olm \ + TEST_NAMESPACE="rhacs-operator-system" \ ROX_PRODUCT_BRANDING=RHACS_BRANDING fi } @@ -1054,17 +1056,17 @@ remove_existing_stackrox_resources() { kubectl delete --wait "$namespace" done - if kubectl get ns stackrox-operator >/dev/null 2>&1; then + if kubectl get ns rhacs-operator-system >/dev/null 2>&1; then # Delete subscription first to give OLM a chance to notice and prevent errors on re-install. # See https://issues.redhat.com/browse/ROX-30450 - kubectl -n stackrox-operator delete --ignore-not-found --wait subscription.operators.coreos.com --all + kubectl -n rhacs-operator-system delete --ignore-not-found --wait subscription.operators.coreos.com --all # Then delete remaining OLM resources. # The awk is a quick hack to omit templating that might confuse kubectl's YAML parser. # We only care about apiVersion, kind and metadata, which do not contain any templating. awk 'BEGIN{interesting=1} /^spec:/{interesting=0} /^---$/{interesting=1} interesting{print}' operator/hack/operator.envsubst.yaml | \ - kubectl -n stackrox-operator delete --ignore-not-found --wait -f - + kubectl -n rhacs-operator-system delete --ignore-not-found --wait -f - fi - kubectl delete --ignore-not-found ns stackrox-operator --wait + kubectl delete --ignore-not-found ns rhacs-operator-system --wait kubectl delete --ignore-not-found crd {centrals.platform,securedclusters.platform,securitypolicies.config}.stackrox.io --wait ) 2>&1 | sed -e 's/^/out: /' || true # (prefix output to avoid triggering prow log focus) info "Finished tearing down resources." diff --git a/tests/e2e/run-scanner-v4-install.bats b/tests/e2e/run-scanner-v4-install.bats index 2c970aca93f1c..64532729ae84d 100755 --- a/tests/e2e/run-scanner-v4-install.bats +++ b/tests/e2e/run-scanner-v4-install.bats @@ -840,7 +840,7 @@ EOT # Install old version of the operator & deploy StackRox. - VERSION="${OPERATOR_VERSION_TAG}" make -C operator deploy-previous-via-olm + VERSION="${OPERATOR_VERSION_TAG}" make -C operator deploy-previous-via-olm TEST_NAMESPACE="rhacs-operator-system" _deploy_stackrox "" "${CUSTOM_CENTRAL_NAMESPACE}" "${CUSTOM_SENSOR_NAMESPACE}" "false" _begin "verify" @@ -856,14 +856,14 @@ EOT # Upgrade operator info "Upgrading StackRox Operator to version ${OPERATOR_VERSION_TAG}..." - VERSION="${OPERATOR_VERSION_TAG}" make -C operator upgrade-via-olm + VERSION="${OPERATOR_VERSION_TAG}" make -C operator upgrade-via-olm TEST_NAMESPACE="rhacs-operator-system" info "Waiting for new rhacs-operator pods to become ready" # Give the old pods some time to terminate, otherwise we can end up # in a situation where the old pods are just about to terminate and this # would confuse the kubectl wait invocation below, which notices pods # vanishing while actually waiting for them to become ready. sleep 60 - "${ORCH_CMD}" Date: Wed, 28 Jan 2026 09:03:03 -0500 Subject: [PATCH 053/232] ROX-32664: Add roxagent to main image (#18383) Add roxagent to the main image and provide Quadlet deployment files for running it as a periodic systemd service on RHEL VMs. What's included --------------- 1. roxagent binary in main image 2. Quadlet deployment files - Systemd units for running roxagent via Podman Quadlet Installation -------------------- Follow these steps on a RHEL VM with Podman installed. Clone or copy the quadlet directory: cd compliance/virtualmachines/roxagent/quadlet Edit roxagent.container to set your image tag, e.g.: Image=quay.io/stackrox-io/main:4.10.0 Install locally: ./install.sh Or install on a remote VM: ./install.sh user@hostname ./install.sh user@hostname 2222 # custom SSH port Quadlet Components ------------------ * roxagent.container: Quadlet container unit that runs roxagent * roxagent.timer: Systemd timer (hourly by default) * roxagent-prep.service: Copies RPM database for SQLite WAL compatibility * install.sh: Installation script * README.md: Detailed documentation RHEL 9 and 10 uses SQLite for its RPM database. SQLite's WAL (Write-Ahead Logging) mode requires write access even for read-only queries. The prep service copies the database to `/tmp/roxagent-rpm` to allow scanning without mounting the host's RPM database read-write. This is not required for the DNF database because claircore already copies it. --- Makefile | 1 + compliance/virtualmachines/roxagent/README.md | 15 +- .../roxagent/quadlet/README.md | 175 ++++++++++++++++++ .../roxagent/quadlet/install.sh | 82 ++++++++ .../roxagent/quadlet/roxagent-prep.service | 11 ++ .../roxagent/quadlet/roxagent.container | 34 ++++ .../roxagent/quadlet/roxagent.timer | 11 ++ image/rhel/Dockerfile | 1 + image/rhel/konflux.Dockerfile | 1 + 9 files changed, 330 insertions(+), 1 deletion(-) create mode 100644 compliance/virtualmachines/roxagent/quadlet/README.md create mode 100755 compliance/virtualmachines/roxagent/quadlet/install.sh create mode 100644 compliance/virtualmachines/roxagent/quadlet/roxagent-prep.service create mode 100644 compliance/virtualmachines/roxagent/quadlet/roxagent.container create mode 100644 compliance/virtualmachines/roxagent/quadlet/roxagent.timer diff --git a/Makefile b/Makefile index fd8462d39ad70..799959e64cc3c 100644 --- a/Makefile +++ b/Makefile @@ -681,6 +681,7 @@ endif cp bin/linux_$(GOARCH)/upgrader image/rhel/bin/sensor-upgrader cp bin/linux_$(GOARCH)/admission-control image/rhel/bin/admission-control cp bin/linux_$(GOARCH)/compliance image/rhel/bin/compliance + cp bin/linux_$(GOARCH)/roxagent image/rhel/bin/roxagent # Workaround to bug in lima: https://github.com/lima-vm/lima/issues/602 find image/rhel/bin -not -path "*/.*" -type f -exec chmod +x {} \; diff --git a/compliance/virtualmachines/roxagent/README.md b/compliance/virtualmachines/roxagent/README.md index 40923f8b2807f..c7704fc7e5dd4 100644 --- a/compliance/virtualmachines/roxagent/README.md +++ b/compliance/virtualmachines/roxagent/README.md @@ -41,7 +41,20 @@ sudo ./roxagent --daemon --index-interval 10m --host-path /custom/path --port 20 The host receives these reports and forwards them to StackRox Central for vulnerability analysis. -## Building +## Deployment + +### Using Quadlet (Recommended for RHEL VMs) + +For RHEL 9 VMs, use Podman Quadlet to run roxagent as a periodic systemd service. +See [quadlet/README.md](quadlet/README.md) for detailed instructions. + +```bash +cd quadlet +./install.sh # Install locally +./install.sh user@host # Install on remote VM +``` + +### Building from Source ```bash go build -o roxagent . diff --git a/compliance/virtualmachines/roxagent/quadlet/README.md b/compliance/virtualmachines/roxagent/quadlet/README.md new file mode 100644 index 0000000000000..a959052166c72 --- /dev/null +++ b/compliance/virtualmachines/roxagent/quadlet/README.md @@ -0,0 +1,175 @@ +# Quadlet Deployment for roxagent + +Deploy roxagent as a periodic systemd service on RHEL VMs using Podman Quadlet. + +## Overview + +This deployment uses [Podman Quadlet](https://docs.podman.io/en/latest/markdown/podman-systemd.unit.5.html) to run roxagent from a container image as a systemd service. The agent runs hourly, scans installed packages, and reports them to StackRox via vsock. + +### Components + +| File | Description | +|------|-------------| +| `roxagent.container` | Quadlet container unit that runs roxagent | +| `roxagent.timer` | Systemd timer that triggers hourly scans | +| `roxagent-prep.service` | Prepares RPM database for scanning | +| `install.sh` | Installation script for local or remote deployment | + +## Prerequisites + +* RHEL 8, 9, or 10 VM running on KubeVirt with vsock enabled +* Podman installed (`dnf install -y podman`) +* StackRox deployed with VM scanning enabled (`ROX_VIRTUAL_MACHINES=true`) +* Network access to pull the StackRox main image + +## Installation + +### 1. Configure the Image Tag + +Edit `roxagent.container` and set the correct image tag: + +```ini +Image=quay.io/stackrox-io/main:4.10.0 +``` + +Use the same version as your StackRox Central deployment. + +### 2. Install the Units + +**Local installation:** + +```bash +./install.sh +``` + +**Remote installation via SSH:** + +```bash +./install.sh user@hostname +./install.sh user@hostname 2222 # Custom SSH port +``` + +### 3. Verify Installation + +```bash +# Check timer status +sudo systemctl list-timers roxagent.timer + +# Run immediately +sudo systemctl start roxagent.service + +# View logs +sudo journalctl -u roxagent.service -f +``` + +## How It Works + +### Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ RHEL VM │ +│ ┌─────────────────┐ │ +│ │ roxagent.timer │ ──(hourly)──▶ roxagent.service │ +│ └─────────────────┘ │ │ +│ ▼ │ +│ ┌─────────────────┐ ┌────────────────────────┐ │ +│ │ roxagent-prep │ ──────▶ │ roxagent container │ │ +│ │ (copy RPM db) │ │ - scans /host/var/lib/ │ │ +│ └─────────────────┘ │ - sends via vsock │ │ +│ └───────────┬────────────┘ │ +└──────────────────────────────────────────┼──────────────────┘ + │ vsock +┌──────────────────────────────────────────┼─────────────────┐ +│ Kubernetes Host ▼ │ +│ ┌────────────────────────────────────────────┐ │ +│ │ collector pod (compliance container) │ │ +│ │ - receives vsock connections │ │ +│ │ - forwards to Sensor │ │ +│ └─────────────────────┬──────────────────────┘ │ +│ │ gRPC │ +│ ▼ │ +│ ┌────────────────────────────────────────────┐ │ +│ │ Sensor ──▶ Central │ │ +│ └────────────────────────────────────────────┘ │ +└────────────────────────────────────────────────────────────┘ +``` + +### Why Copy the RPM Database? + +The `roxagent-prep.service` copies `/var/lib/rpm` to `/tmp/roxagent-rpm` before each scan. This is required because: + +1. **SQLite WAL Mode**: RHEL 9 and 10 use SQLite for the RPM database. SQLite's Write-Ahead Logging (WAL) requires write access even for read-only queries. RHEL 8 uses BerkeleyDB, which also benefits from copying. + +2. **Safety**: Copying protects the host's RPM database from any potential issues during scanning. + +3. **Consistency**: The copy provides a point-in-time snapshot, avoiding conflicts if packages are installed during the scan. + +## Configuration + +### Scan Interval + +Edit `roxagent.timer` to change the scan frequency: + +```ini +[Timer] +OnBootSec=5min # First scan after boot +OnUnitActiveSec=1h # Subsequent scans (change to 30m, 2h, etc.) +``` + +### Container Options + +Edit `roxagent.container` to customize: + +```ini +# Add verbose output +Exec=--verbose --host-path /host + +# Change vsock port (must match StackRox config) +Exec=--host-path /host --port 2048 +``` + +## Troubleshooting + +### No packages found + +Check if the RPM database copy succeeded: + +```bash +ls -la /tmp/roxagent-rpm/ +sudo journalctl -u roxagent-prep.service +``` + +### vsock connection failed + +Verify vsock is enabled in the VM: + +```bash +ls -la /dev/vsock +lsmod | grep vsock +``` + +### Container fails to start + +Check Quadlet generation: + +```bash +/usr/libexec/podman/quadlet --dryrun +sudo journalctl -u roxagent.service +``` + +### VM not appearing in Central + +1. Verify `ROX_VIRTUAL_MACHINES=true` is set on Central and Sensor +2. Check compliance container logs in the collector pod +3. Verify Sensor can reach Central + +## Uninstallation + +```bash +sudo systemctl disable --now roxagent.timer +sudo rm /etc/containers/systemd/roxagent.container +sudo rm /etc/systemd/system/roxagent.timer +sudo rm /etc/systemd/system/roxagent-prep.service +sudo systemctl daemon-reload +``` diff --git a/compliance/virtualmachines/roxagent/quadlet/install.sh b/compliance/virtualmachines/roxagent/quadlet/install.sh new file mode 100755 index 0000000000000..49da697fc7b91 --- /dev/null +++ b/compliance/virtualmachines/roxagent/quadlet/install.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# Install roxagent Quadlet units on a RHEL VM +# +# Usage: +# ./install.sh # Install locally +# ./install.sh user@host # Install on remote host via SSH +# ./install.sh user@host 2222 # Install on remote host with custom SSH port + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +install_locally() { + echo "Installing Quadlet units locally..." + + # Quadlet container file + sudo mkdir -p /etc/containers/systemd/ + sudo cp "${SCRIPT_DIR}/roxagent.container" /etc/containers/systemd/ + sudo restorecon -Rv /etc/containers/systemd/ 2>/dev/null || true + + # Timer and prep service go in standard systemd directory + sudo cp "${SCRIPT_DIR}/roxagent.timer" /etc/systemd/system/ + sudo cp "${SCRIPT_DIR}/roxagent-prep.service" /etc/systemd/system/ + sudo restorecon -Rv /etc/systemd/system/roxagent.timer /etc/systemd/system/roxagent-prep.service 2>/dev/null || true + + echo "Reloading systemd..." + sudo systemctl daemon-reload + + echo "Enabling and starting timer..." + sudo systemctl enable --now roxagent.timer + + echo "Status:" + sudo systemctl list-timers roxagent.timer +} + +install_remote() { + local REMOTE_HOST="$1" + local SSH_PORT="${2:-22}" + + echo "Installing Quadlet units on ${REMOTE_HOST} (port ${SSH_PORT})..." + + # Copy files + scp -P "${SSH_PORT}" "${SCRIPT_DIR}/roxagent.container" "${REMOTE_HOST}:/tmp/" + scp -P "${SSH_PORT}" "${SCRIPT_DIR}/roxagent.timer" "${REMOTE_HOST}:/tmp/" + scp -P "${SSH_PORT}" "${SCRIPT_DIR}/roxagent-prep.service" "${REMOTE_HOST}:/tmp/" + + # Install on remote + ssh -p "${SSH_PORT}" "${REMOTE_HOST}" << 'EOF' + # Quadlet container file + sudo mkdir -p /etc/containers/systemd/ + sudo mv /tmp/roxagent.container /etc/containers/systemd/ + sudo restorecon -Rv /etc/containers/systemd/ 2>/dev/null || true + + # Timer and prep service go in standard systemd directory + sudo mv /tmp/roxagent.timer /etc/systemd/system/ + sudo mv /tmp/roxagent-prep.service /etc/systemd/system/ + sudo restorecon -Rv /etc/systemd/system/roxagent.timer /etc/systemd/system/roxagent-prep.service 2>/dev/null || true + + echo "Reloading systemd..." + sudo systemctl daemon-reload + + echo "Enabling and starting timer..." + sudo systemctl enable --now roxagent.timer + + echo "Status:" + sudo systemctl list-timers roxagent.timer +EOF +} + +# Main +if [ $# -eq 0 ]; then + install_locally +else + install_remote "$1" "${2:-22}" +fi + +echo "" +echo "Done! The roxagent will run hourly." +echo "" +echo "To run immediately: sudo systemctl start roxagent.service" +echo "To view logs: sudo journalctl -u roxagent.service -f" +echo "To check timer: sudo systemctl list-timers roxagent.timer" diff --git a/compliance/virtualmachines/roxagent/quadlet/roxagent-prep.service b/compliance/virtualmachines/roxagent/quadlet/roxagent-prep.service new file mode 100644 index 0000000000000..35f072e3cb802 --- /dev/null +++ b/compliance/virtualmachines/roxagent/quadlet/roxagent-prep.service @@ -0,0 +1,11 @@ +[Unit] +Description=Prepare RPM database for StackRox VM Agent +# Copy the RPM database to a writable location because SQLite WAL mode +# requires write access even for read-only queries + +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/bin/rm -rf /tmp/roxagent-rpm +ExecStart=/bin/cp -a /var/lib/rpm /tmp/roxagent-rpm +ExecStart=/bin/chmod -R 755 /tmp/roxagent-rpm diff --git a/compliance/virtualmachines/roxagent/quadlet/roxagent.container b/compliance/virtualmachines/roxagent/quadlet/roxagent.container new file mode 100644 index 0000000000000..aa2fbb6a447ea --- /dev/null +++ b/compliance/virtualmachines/roxagent/quadlet/roxagent.container @@ -0,0 +1,34 @@ +[Unit] +Description=StackRox VM Agent Container +# Copy RPM database before starting (SQLite WAL requires writable access) +Requires=roxagent-prep.service +After=roxagent-prep.service + +[Container] +# Replace with your StackRox main image tag +Image=quay.io/stackrox-io/main:4.10.x-697-g3c843a274f +Exec=--host-path /host +Network=host + +# Privileged for vsock access +# Use PodmanArgs for entrypoint (RHEL 8 Quadlet doesn't support Entrypoint key) +# Run as root since image defaults to non-root user +SecurityLabelDisable=true +PodmanArgs=--privileged --user root --entrypoint /stackrox/bin/roxagent --device /dev/vsock + +# Mount copied RPM database (writable for SQLite WAL) and host config files (read-only) +Volume=/tmp/roxagent-rpm:/host/var/lib/rpm:rw +Volume=/etc/yum.repos.d:/host/etc/yum.repos.d:ro +Volume=/etc/os-release:/host/etc/os-release:ro +Volume=/etc/redhat-release:/host/etc/redhat-release:ro +Volume=/etc/system-release-cpe:/host/etc/system-release-cpe:ro +# DNF cache and state for repo-to-package mapping +Volume=/var/cache/dnf:/host/var/cache/dnf:ro +Volume=/var/lib/dnf:/host/var/lib/dnf:ro + +[Service] +Type=oneshot +TimeoutStartSec=300 + +[Install] +WantedBy= diff --git a/compliance/virtualmachines/roxagent/quadlet/roxagent.timer b/compliance/virtualmachines/roxagent/quadlet/roxagent.timer new file mode 100644 index 0000000000000..603b372913253 --- /dev/null +++ b/compliance/virtualmachines/roxagent/quadlet/roxagent.timer @@ -0,0 +1,11 @@ +[Unit] +Description=Run StackRox VM Agent periodically + +[Timer] +OnBootSec=5min +OnUnitActiveSec=3h40m +RandomizedDelaySec=40min +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/image/rhel/Dockerfile b/image/rhel/Dockerfile index ca26215f8951f..1b2507d3b0695 100644 --- a/image/rhel/Dockerfile +++ b/image/rhel/Dockerfile @@ -85,6 +85,7 @@ COPY bin/kubernetes-sensor /stackrox/bin/kubernetes-sensor COPY bin/sensor-upgrader /stackrox/bin/sensor-upgrader COPY bin/admission-control /stackrox/bin/admission-control COPY bin/config-controller /stackrox/bin/config-controller +COPY bin/roxagent /stackrox/bin/roxagent COPY bin/roxctl* /assets/downloads/cli/ RUN ln -s /assets/downloads/cli/roxctl-linux-${TARGET_ARCH} /stackrox/roxctl && \ diff --git a/image/rhel/konflux.Dockerfile b/image/rhel/konflux.Dockerfile index fccfb2974cbf5..62a76a3be257b 100644 --- a/image/rhel/konflux.Dockerfile +++ b/image/rhel/konflux.Dockerfile @@ -80,6 +80,7 @@ COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/bin/kubern COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/bin/sensor-upgrader /stackrox/bin/ COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/bin/admission-control /stackrox/bin/ COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/bin/config-controller /stackrox/bin/ +COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/bin/roxagent /stackrox/bin/ COPY --from=go-builder /go/src/github.com/stackrox/rox/app/image/rhel/static-bin/* /stackrox/ RUN GOARCH=$(uname -m) ; \ case $GOARCH in x86_64) GOARCH=amd64 ;; aarch64) GOARCH=arm64 ;; esac ; \ From 4be71cf0d3b31e177cf32def1739b34bf42c5434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Wed, 28 Jan 2026 15:30:41 +0100 Subject: [PATCH 054/232] ROX-32730: Add labels to the pyroscope integration (#18678) --- pkg/continuousprofiling/profiler.go | 31 ++++++ pkg/continuousprofiling/profiler_test.go | 115 +++++++++++++++++++++-- pkg/env/continuous_profiling.go | 22 +++++ 3 files changed, 162 insertions(+), 6 deletions(-) diff --git a/pkg/continuousprofiling/profiler.go b/pkg/continuousprofiling/profiler.go index 69217f052fe62..4a87e0dae7931 100644 --- a/pkg/continuousprofiling/profiler.go +++ b/pkg/continuousprofiling/profiler.go @@ -3,6 +3,7 @@ package continuousprofiling import ( "net/url" "runtime" + "strings" "github.com/grafana/pyroscope-go" "github.com/pkg/errors" @@ -57,6 +58,10 @@ var ( // DefaultConfig creates a new configuration with default properties. func DefaultConfig() *pyroscope.Config { + labels, err := parseLabels(env.ContinuousProfilingLabels.Setting()) + if err != nil { + log.Errorf("Unable to parse Labels in %q: %v", env.ContinuousProfilingLabels.EnvVar(), err) + } return &pyroscope.Config{ ApplicationName: env.ContinuousProfilingAppName.Setting(), ServerAddress: env.ContinuousProfilingServerAddress.Setting(), @@ -64,6 +69,7 @@ func DefaultConfig() *pyroscope.Config { BasicAuthPassword: env.ContinuousProfilingBasicAuthPassword.Setting(), ProfileTypes: DefaultProfiles, Logger: nil, + Tags: labels, } } @@ -105,6 +111,31 @@ func validateServerAddress(address string) (string, error) { return sanitizedAddress, nil } +func parseLabels(labels string) (map[string]string, error) { + parsedLabels := make(map[string]string) + entries := strings.Split(labels, ",") + for _, entry := range entries { + entry = strings.TrimSpace(entry) + if entry == "" { + continue + } + parts := strings.SplitN(entry, "=", 2) + if len(parts) != 2 { + return nil, errors.Errorf("invalid label format: %q (expected key=value)", entry) + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + if key == "" { + return nil, errors.Errorf("empty label key in %q", entry) + } + if value == "" { + return nil, errors.Errorf("empty label value in %q", entry) + } + parsedLabels[key] = value + } + return parsedLabels, nil +} + func validateConfig(cfg *pyroscope.Config) error { if cfg.ApplicationName == "" { return ErrApplicationName diff --git a/pkg/continuousprofiling/profiler_test.go b/pkg/continuousprofiling/profiler_test.go index b635323efba9f..1911cb9295f87 100644 --- a/pkg/continuousprofiling/profiler_test.go +++ b/pkg/continuousprofiling/profiler_test.go @@ -41,12 +41,29 @@ func TestContinuousProfiling(t *testing.T) { } func (s *continuousProfilingSuite) TestDefaultValues() { - s.T().Setenv(env.ContinuousProfilingAppName.EnvVar(), "test") - cfg := DefaultConfig() - s.startClientFuncWrapper.EXPECT().Start(gomock.Any()).Times(1).Return(nil, nil) - s.Assert().NoError(SetupClient(cfg)) - s.Assert().Equal(*cfg, *DefaultConfig()) - s.Assert().Equal(mutexProfileFraction, runtime.SetMutexProfileFraction(-1)) + s.Run("all defaults success", func() { + s.T().Setenv(env.ContinuousProfilingAppName.EnvVar(), "test") + s.T().Setenv(env.ContinuousProfilingLabels.EnvVar(), "app=stackrox,env=production") + cfg := DefaultConfig() + s.startClientFuncWrapper.EXPECT().Start(gomock.Any()).Times(1).Return(nil, nil) + s.Assert().NoError(SetupClient(cfg)) + s.Assert().Equal(*cfg, *DefaultConfig()) + s.Assert().Equal(mutexProfileFraction, runtime.SetMutexProfileFraction(-1)) + s.Assert().Equal(map[string]string{ + "app": "stackrox", + "env": "production", + }, cfg.Tags) + }) + s.Run("fail labels parsing", func() { + s.T().Setenv(env.ContinuousProfilingAppName.EnvVar(), "test") + s.T().Setenv(env.ContinuousProfilingLabels.EnvVar(), "invalid-labels") + cfg := DefaultConfig() + s.startClientFuncWrapper.EXPECT().Start(gomock.Any()).Times(1).Return(nil, nil) + s.Assert().NoError(SetupClient(cfg)) + s.Assert().Equal(*cfg, *DefaultConfig()) + s.Assert().Equal(mutexProfileFraction, runtime.SetMutexProfileFraction(-1)) + s.Assert().Nil(cfg.Tags) + }) } func (s *continuousProfilingSuite) TestProfileValidation() { @@ -165,6 +182,92 @@ func (s *continuousProfilingSuite) TestClientStartError() { s.Assert().Error(SetupClient(cfg)) } +func (s *continuousProfilingSuite) TestParseLabels() { + cases := map[string]struct { + input string + expectedLabels map[string]string + expectedError string + }{ + "empty string": { + input: "", + expectedLabels: map[string]string{}, + }, + "single label": { + input: "key=value", + expectedLabels: map[string]string{ + "key": "value", + }, + }, + "multiple labels": { + input: "app=stackrox,env=production,team=security", + expectedLabels: map[string]string{ + "app": "stackrox", + "env": "production", + "team": "security", + }, + }, + "labels with whitespace": { + input: " key1 = value1 , key2 = value2 ", + expectedLabels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + "empty entries ignored": { + input: "key1=value1,,key2=value2,,,", + expectedLabels: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + "value with equals sign": { + input: "url=https://example.com,token=abc=123", + expectedLabels: map[string]string{ + "url": "https://example.com", + "token": "abc=123", + }, + }, + "invalid format no equals": { + input: "invalid", + expectedError: "invalid label format", + }, + "empty key": { + input: "=value", + expectedError: "empty label key", + }, + "empty key with whitespace": { + input: " =value", + expectedError: "empty label key", + }, + "empty value": { + input: "key=", + expectedError: "empty label value", + }, + "empty value with whitespace": { + input: "key= ", + expectedError: "empty label value", + }, + "multiple entries with one invalid": { + input: "valid=yes,invalid", + expectedError: "invalid label format", + }, + } + + for tName, tCase := range cases { + s.Run(tName, func() { + labels, err := parseLabels(tCase.input) + if tCase.expectedError != "" { + s.Assert().Error(err) + s.Assert().Contains(err.Error(), tCase.expectedError) + s.Assert().Nil(labels) + } else { + s.Assert().NoError(err) + s.Assert().Equal(tCase.expectedLabels, labels) + } + }) + } +} + func resetRuntimeProfiles(t *testing.T) { runtime.SetBlockProfileRate(0) runtime.SetMutexProfileFraction(0) diff --git a/pkg/env/continuous_profiling.go b/pkg/env/continuous_profiling.go index e5cde403b80e7..448d8776cb41b 100644 --- a/pkg/env/continuous_profiling.go +++ b/pkg/env/continuous_profiling.go @@ -17,4 +17,26 @@ var ( // ContinuousProfilingAppName defines the AppName used to send the profiles ContinuousProfilingAppName = RegisterSetting("ROX_CONTINUOUS_PROFILING_APP_NAME", WithDefault(os.Getenv("POD_NAME"))) + + // ContinuousProfilingLabels defines additional labels/tags to attach to profiling data sent to Pyroscope. + // Format: Comma-separated list of key=value pairs (e.g., "env=production,region=us-east,team=security") + // + // Parsing behavior: + // - Whitespace around keys, values, and commas is trimmed + // - Empty entries (consecutive commas) are ignored + // - Values can contain equals signs (e.g., "token=abc=123" is valid) + // - Both keys and values must be non-empty after trimming + // + // Error handling: + // - An invalid entry is logged as an error but doesn't prevent profiler initialization + // - If parsing fails, no labels are set + // + // Examples: + // Valid: "app=central,env=prod" + // Valid: " key1 = value1 , key2 = value2 " (whitespace is trimmed) + // Valid: "token=abc=123" (values can contain '=') + // Invalid: "key=" (empty value) + // Invalid: "=value" (empty key) + // Invalid: "noequals" (missing '=' separator) + ContinuousProfilingLabels = RegisterSetting("ROX_CONTINUOUS_PROFILING_LABELS") ) From d22f115b33c32e8c5abcee0c53e7358d1cb5f367 Mon Sep 17 00:00:00 2001 From: Guzman Date: Wed, 28 Jan 2026 15:40:08 +0100 Subject: [PATCH 055/232] chore(monitoring): Enable anonymous access (#18724) --- deploy/charts/monitoring/templates/grafana.yaml | 5 +++++ deploy/charts/monitoring/values.yaml | 1 + 2 files changed, 6 insertions(+) diff --git a/deploy/charts/monitoring/templates/grafana.yaml b/deploy/charts/monitoring/templates/grafana.yaml index 6b515856bd58a..d683af347cfab 100644 --- a/deploy/charts/monitoring/templates/grafana.yaml +++ b/deploy/charts/monitoring/templates/grafana.yaml @@ -27,6 +27,11 @@ data: # default admin password, can be changed before first start of grafana, or in profile settings admin_password = {{ required "A Grafana password is required" .Values.password }} + + [auth.anonymous] + # When enabled, user/pass is not needed to access with admin rights + enabled = {{ .Values.anonymousAdminAccess }} + org_role = Admin --- apiVersion: v1 kind: ConfigMap diff --git a/deploy/charts/monitoring/values.yaml b/deploy/charts/monitoring/values.yaml index 8f65111977776..f8f12324bb095 100644 --- a/deploy/charts/monitoring/values.yaml +++ b/deploy/charts/monitoring/values.yaml @@ -1,6 +1,7 @@ grafanaImage: grafana/grafana:10.4.17 prometheusImage: prom/prometheus:v2.34.0 password: stackrox +anonymousAdminAccess: true resources: requests: From 37738aee7676a00fd816787e6e8ff5d46a9efce8 Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Wed, 28 Jan 2026 15:50:14 +0100 Subject: [PATCH 056/232] ROX-32851: deprecate roxctl netpol NP-Guard commands (#18720) --- CHANGELOG.md | 2 ++ roxctl/common/deprecation.go | 4 ++++ roxctl/netpol/connectivity/diff/command.go | 8 +++++--- roxctl/netpol/connectivity/map/command.go | 8 +++++--- roxctl/netpol/generate/command.go | 10 ++++++---- tests/roxctl/bats-tests/helpers.bash | 8 ++++++++ tests/roxctl/bats-tests/local/expect/roxctl--help.txt | 1 - .../roxctl-netpol-connectivity-map-development.bats | 5 +++++ .../local/roxctl-netpol-connectivity-map-release.bats | 5 +++++ .../local/roxctl-netpol-generate-development.bats | 7 +++++-- .../local/roxctl-netpol-generate-release.bats | 7 +++++-- 11 files changed, 50 insertions(+), 15 deletions(-) create mode 100644 roxctl/common/deprecation.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fb59d5cac35c..446ccce6c19c0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ### Deprecated Features +- ROX-32851: The `roxctl netpol generate`, `roxctl netpol connectivity map`, and `roxctl netpol connectivity diff` commands are deprecated because they rely on the unmaintained NP-Guard library and will be removed in a future release. + ### Technical Changes - ROX-30769: Update Node.js requirement for ui folder to 22.13.0 - ROX-31295: The lower limit for `ROX_MAX_PARALLEL_IMAGE_SCAN_INTERNAL` on Sensor has been reduced to one (from 10). diff --git a/roxctl/common/deprecation.go b/roxctl/common/deprecation.go new file mode 100644 index 0000000000000..deac106b99d77 --- /dev/null +++ b/roxctl/common/deprecation.go @@ -0,0 +1,4 @@ +package common + +// DeprecatedCommandNotice is a generic deprecation message for CLI commands. +const DeprecatedCommandNotice = "This command is deprecated and will be removed in a future release." diff --git a/roxctl/netpol/connectivity/diff/command.go b/roxctl/netpol/connectivity/diff/command.go index dc54cab1e3e4b..c5006b65cf028 100644 --- a/roxctl/netpol/connectivity/diff/command.go +++ b/roxctl/netpol/connectivity/diff/command.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/stackrox/rox/pkg/errox" + "github.com/stackrox/rox/roxctl/common" "github.com/stackrox/rox/roxctl/common/environment" "github.com/stackrox/rox/roxctl/common/npg" ) @@ -30,9 +31,10 @@ type diffNetpolCommand struct { func Command(cliEnvironment environment.Environment) *cobra.Command { diffNetpolCmd := &diffNetpolCommand{env: cliEnvironment} c := &cobra.Command{ - Use: "diff", - Short: "Report connectivity-diff based on two directories containing network policies and YAML manifests with workload resources", - Long: `Based on two input folders containing Kubernetes workloads and network policy YAMLs, this command will report all differences in allowed connections between the resources.`, + Use: "diff", + Short: "Report connectivity-diff based on two directories containing network policies and YAML manifests with workload resources", + Long: `Based on two input folders containing Kubernetes workloads and network policy YAMLs, this command will report all differences in allowed connections between the resources.`, + Deprecated: common.DeprecatedCommandNotice, Args: cobra.ExactArgs(0), RunE: func(c *cobra.Command, args []string) error { diff --git a/roxctl/netpol/connectivity/map/command.go b/roxctl/netpol/connectivity/map/command.go index b84ff2f13c338..9729cc335ef49 100644 --- a/roxctl/netpol/connectivity/map/command.go +++ b/roxctl/netpol/connectivity/map/command.go @@ -6,6 +6,7 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/stackrox/rox/pkg/errox" + "github.com/stackrox/rox/roxctl/common" "github.com/stackrox/rox/roxctl/common/environment" "github.com/stackrox/rox/roxctl/common/npg" ) @@ -32,9 +33,10 @@ type Cmd struct { func Command(cliEnvironment environment.Environment) *cobra.Command { cmd := NewCmd(cliEnvironment) c := &cobra.Command{ - Use: "map ", - Short: "Analyze connectivity based on network policies and other resources", - Long: `Based on a given folder containing deployment and network policy YAMLs, will analyze permitted cluster connectivity. Will write to stdout if no output flags are provided.`, + Use: "map ", + Short: "Analyze connectivity based on network policies and other resources", + Long: `Based on a given folder containing deployment and network policy YAMLs, will analyze permitted cluster connectivity. Will write to stdout if no output flags are provided.`, + Deprecated: common.DeprecatedCommandNotice, Args: cobra.ExactArgs(1), RunE: func(c *cobra.Command, args []string) error { diff --git a/roxctl/netpol/generate/command.go b/roxctl/netpol/generate/command.go index aa4a624a7261c..0bd91f6356882 100644 --- a/roxctl/netpol/generate/command.go +++ b/roxctl/netpol/generate/command.go @@ -2,6 +2,7 @@ package generate import ( "github.com/spf13/cobra" + "github.com/stackrox/rox/roxctl/common" "github.com/stackrox/rox/roxctl/common/environment" ) @@ -9,10 +10,11 @@ import ( func Command(cliEnvironment environment.Environment) *cobra.Command { cmd := &netpolGenerateCmd{env: cliEnvironment} c := &cobra.Command{ - Use: "generate ", - Short: "Recommend Network Policies based on deployment information", - Long: "Based on a given folder containing deployment YAMLs, will generate a list of recommended Network Policies. Will write to stdout if no output flags are provided.", - Args: cobra.ExactArgs(1), + Use: "generate ", + Short: "Recommend Network Policies based on deployment information", + Long: "Based on a given folder containing deployment YAMLs, will generate a list of recommended Network Policies. Will write to stdout if no output flags are provided.", + Deprecated: common.DeprecatedCommandNotice, + Args: cobra.ExactArgs(1), RunE: func(c *cobra.Command, args []string) error { return cmd.RunE(c, args) }, diff --git a/tests/roxctl/bats-tests/helpers.bash b/tests/roxctl/bats-tests/helpers.bash index 4d78a10446f5c..2f86cbb2aef4b 100644 --- a/tests/roxctl/bats-tests/helpers.bash +++ b/tests/roxctl/bats-tests/helpers.bash @@ -14,6 +14,14 @@ luname() { uname | tr '[:upper:]' '[:lower:]' } +strip_deprecation_notice() { + if [[ $# -gt 0 ]]; then + printf '%s\n' "$1" | sed -e '/^Command ".*" is deprecated,/d' + return + fi + sed -e '/^Command ".*" is deprecated,/d' +} + tmp_roxctl="tmp/roxctl-bats/bin" test_data="$BATS_TEST_DIRNAME/../test-data" diff --git a/tests/roxctl/bats-tests/local/expect/roxctl--help.txt b/tests/roxctl/bats-tests/local/expect/roxctl--help.txt index eba46c4bc744c..22833cddc08ee 100644 --- a/tests/roxctl/bats-tests/local/expect/roxctl--help.txt +++ b/tests/roxctl/bats-tests/local/expect/roxctl--help.txt @@ -10,7 +10,6 @@ Available Commands: deployment Commands related to deployments helm Commands related to StackRox Helm Charts image Commands that you can run on a specific image - netpol Commands related to network policies scanner Commands related to the Scanner service sensor Commands related to deploying StackRox services in secured clusters diff --git a/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-development.bats b/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-development.bats index 9405d9f9cda95..1f6254c56859f 100644 --- a/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-development.bats +++ b/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-development.bats @@ -434,6 +434,7 @@ payments/gateway[Deployment] => payments/visa-processor[Deployment] : TCP 8080' check_acs_security_demos_files run roxctl-development netpol connectivity map "${acs_security_demos_dir}" --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -499,6 +500,7 @@ frontend/webapp[Deployment] <= entire-cluster : TCP 8080' run roxctl-development netpol connectivity map "${acs_security_demos_dir}" --output-format=dot --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -586,6 +588,7 @@ payments/gateway[Deployment] => entire-cluster : UDP 5353' run roxctl-development netpol connectivity map "${test_data}/np-guard/exposure-example" --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -615,6 +618,7 @@ hello-world/workload-a[Deployment] is not protected on Egress' run roxctl-development netpol connectivity map "${test_data}/np-guard/anp_banp_demo" assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -685,6 +689,7 @@ Denied connections: run roxctl-development netpol connectivity map "${test_data}/np-guard/netpols-analysis-example-minimal" --explain --output-format=md assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected diff --git a/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-release.bats b/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-release.bats index 0cff82b0cf47b..346523e4e0ec5 100644 --- a/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-release.bats +++ b/tests/roxctl/bats-tests/local/roxctl-netpol-connectivity-map-release.bats @@ -434,6 +434,7 @@ payments/gateway[Deployment] => payments/visa-processor[Deployment] : TCP 8080' check_acs_security_demos_files run roxctl-release netpol connectivity map "${acs_security_demos_dir}" --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -499,6 +500,7 @@ frontend/webapp[Deployment] <= entire-cluster : TCP 8080' run roxctl-release netpol connectivity map "${acs_security_demos_dir}" --output-format=dot --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -586,6 +588,7 @@ payments/gateway[Deployment] => entire-cluster : UDP 5353' run roxctl-release netpol connectivity map "${test_data}/np-guard/exposure-example" --exposure assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -615,6 +618,7 @@ hello-world/workload-a[Deployment] is not protected on Egress' run roxctl-release netpol connectivity map "${test_data}/np-guard/anp_banp_demo" assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected @@ -685,6 +689,7 @@ Denied connections: run roxctl-release netpol connectivity map "${test_data}/np-guard/netpols-analysis-example-minimal" --explain --output-format=md assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" # normalizing tabs and whitespaces in output so it will be easier to compare with expected diff --git a/tests/roxctl/bats-tests/local/roxctl-netpol-generate-development.bats b/tests/roxctl/bats-tests/local/roxctl-netpol-generate-development.bats index e20c0d245a7c5..5cfebca4c184e 100755 --- a/tests/roxctl/bats-tests/local/roxctl-netpol-generate-development.bats +++ b/tests/roxctl/bats-tests/local/roxctl-netpol-generate-development.bats @@ -23,9 +23,9 @@ teardown() { rm -f "$ofile" } -@test "roxctl-development netpol generate should not show deprecation info" { +@test "roxctl-development netpol generate shows deprecation info" { run roxctl-development netpol generate - refute_line --partial "is deprecated" + assert_line --partial "is deprecated" } @test "roxctl-development netpol generate should return error on empty or non-existing directory" { @@ -47,6 +47,7 @@ teardown() { run roxctl-development netpol generate "${test_data}/np-guard/scenario-minimal-service" assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" @@ -85,6 +86,7 @@ teardown() { run roxctl-development netpol generate "${test_data}/np-guard/scenario-minimal-service" --dnsport ${dns_port} assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" @@ -123,6 +125,7 @@ teardown() { run roxctl-development netpol generate "${test_data}/np-guard/scenario-minimal-service" --dnsport ${dns_port} assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" diff --git a/tests/roxctl/bats-tests/local/roxctl-netpol-generate-release.bats b/tests/roxctl/bats-tests/local/roxctl-netpol-generate-release.bats index 83d1063539c5d..f205ccd875e76 100755 --- a/tests/roxctl/bats-tests/local/roxctl-netpol-generate-release.bats +++ b/tests/roxctl/bats-tests/local/roxctl-netpol-generate-release.bats @@ -24,9 +24,9 @@ teardown() { rm -f "$ofile" } -@test "roxctl-release netpol generate should not show deprecation info" { +@test "roxctl-release netpol generate shows deprecation info" { run roxctl-release netpol generate - refute_line --partial "is deprecated" + assert_line --partial "is deprecated" } @test "roxctl-release netpol generate should return error on empty or non-existing directory" { @@ -48,6 +48,7 @@ teardown() { run roxctl-release netpol generate "${test_data}/np-guard/scenario-minimal-service" assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" @@ -107,6 +108,7 @@ teardown() { run roxctl-release netpol generate "${test_data}/np-guard/scenario-minimal-service" --dnsport ${dns_port} assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" @@ -145,6 +147,7 @@ teardown() { run roxctl-release netpol generate "${test_data}/np-guard/scenario-minimal-service" --dnsport ${dns_port} assert_success + output=$(strip_deprecation_notice "$output") echo "$output" > "$ofile" assert_file_exist "$ofile" yaml_valid "$ofile" From a6ba6d4074bc94d4448d5c3cd8d30941662c4dba Mon Sep 17 00:00:00 2001 From: davdhacs <105243888+davdhacs@users.noreply.github.com> Date: Wed, 28 Jan 2026 08:33:20 -0700 Subject: [PATCH 057/232] ROX-32312: retry gcloud_auth to handle network flakiness (#18273) --- scripts/ci/gcp.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/ci/gcp.sh b/scripts/ci/gcp.sh index fc428ca02e3e4..3937a1f7d6f95 100755 --- a/scripts/ci/gcp.sh +++ b/scripts/ci/gcp.sh @@ -32,7 +32,8 @@ setup_gcp() { die "Support is missing for this environment" fi - gcloud auth activate-service-account --key-file <(echo "$service_account") + retry 7 true gcloud auth activate-service-account --key-file <(echo "$service_account") + gcloud auth list gcloud config set project acs-san-stackroxci gcloud config set compute/region us-central1 From d756e44711cc0532b5601996a6148b4b2601d583 Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Wed, 28 Jan 2026 21:52:07 +0530 Subject: [PATCH 058/232] fix(ui): Prettify the file permissions on the violation details (#18717) --- .../Details/FileAccessCardContent.tsx | 41 +++++++++++++++++-- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx b/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx index 77b14a83f2513..b06fb69795c14 100644 --- a/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx +++ b/ui/apps/platform/src/Containers/Violations/Details/FileAccessCardContent.tsx @@ -18,6 +18,41 @@ function formatOperation(operation: FileOperation): string { return fileOperations.get(operation) || 'Unknown'; } +/** + * Converts a numeric file mode to a Linux file permissions string. + * + * @param mode - The file mode as a base-10 number (e.g., 33188 for 0o100644) + * @returns A string representation of the permissions (e.g., "rw-r--r--") + * + * @example + * formatFileMode(33188) // returns "rw-r--r--" (0o644) + * formatFileMode(33261) // returns "rwxr-xr-x" (0o755) + * formatFileMode(16877) // returns "rwxr-xr-x" (directory with 0o755) + */ +function formatFileMode(mode: number): string { + // Map each octal digit (0-7) to its rwx permission string + const octalToPermission: Record = { + '0': '---', + '1': '--x', + '2': '-w-', + '3': '-wx', + '4': 'r--', + '5': 'r-x', + '6': 'rw-', + '7': 'rwx', + }; + + // Extract the permission bits (lower 9 bits) and convert to octal string + const permissionBits = mode % 512; // 512 = 0o1000, equivalent to mode & 0o777 + const octalString = permissionBits.toString(8).padStart(3, '0'); + + // Convert each octal digit to its permission string + return octalString + .split('') + .map((digit) => octalToPermission[digit]) + .join(''); +} + type FileAccessCardContentProps = { event: FileAccess; }; @@ -60,7 +95,7 @@ function FileAccessCardContent({ event }: FileAccessCardContentProps): ReactElem )} - {file.meta && File metadata:} + {file.meta && File metadata} {file.meta && ( {file.meta.username && ( @@ -75,8 +110,8 @@ function FileAccessCardContent({ event }: FileAccessCardContentProps): ReactElem )} {Number.isInteger(file.meta.mode) && ( )} From acef8da439958af57c0a7dcd68dba8db395be666 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Wed, 28 Jan 2026 11:57:41 -0500 Subject: [PATCH 059/232] ROX-32837: Re-align layer type filter with backend search term (#18709) --- .../CompoundSearchFilter/attributes/imageComponent.ts | 6 +++--- ui/apps/platform/src/hooks/useAnalytics.ts | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/imageComponent.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/imageComponent.ts index ddb13223c9480..2b684c01b0489 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/imageComponent.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/imageComponent.ts @@ -32,13 +32,13 @@ export const Version: CompoundSearchFilterAttribute = { export const LayerType: CompoundSearchFilterAttribute = { displayName: 'Layer type', filterChipLabel: 'Image component layer type', - searchTerm: 'Component From Base Image', + searchTerm: 'Component Layer Type', inputType: 'select', featureFlagDependency: ['ROX_BASE_IMAGE_DETECTION'], inputProps: { options: [ - { label: 'Application', value: 'false' }, - { label: 'Base image', value: 'true' }, + { label: 'Application', value: 'APPLICATION' }, + { label: 'Base image', value: 'BASE_IMAGE' }, ], }, }; diff --git a/ui/apps/platform/src/hooks/useAnalytics.ts b/ui/apps/platform/src/hooks/useAnalytics.ts index 76585eff33588..4c453b21cd6bf 100644 --- a/ui/apps/platform/src/hooks/useAnalytics.ts +++ b/ui/apps/platform/src/hooks/useAnalytics.ts @@ -114,7 +114,7 @@ type AnalyticsBoolean = 0 | 1; */ export const searchCategoriesWithFilter = [ 'Component Source', - 'Component From Base Image', + 'Component Layer Type', 'SEVERITY', 'FIXABLE', 'CLUSTER CVE FIXABLE', From a8a253cdcb4dae1834191f7b3f2eb6d8b3c2cfd7 Mon Sep 17 00:00:00 2001 From: Giles Hutton Date: Wed, 28 Jan 2026 17:18:10 +0000 Subject: [PATCH 060/232] chore(fim): add changelog entry (#18723) --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 446ccce6c19c0..61c9e1aa6da8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc - ROX-31960, ROX-32449: include and exclude filters for custom metrics. - ROX-30641: Added a new policy criteria "Days Since CVE Fix Was Available". - Tech preview: operator-based installation available for community StackRox build. More information in [a separate README file](operator/install/README.md). +- ROX-30585, ROX-30196 (Tech Preview): Added file activity monitoring, including new policy criteria for deployment or node file activity. ### Removed Features - ROX-31727: `/v1/cve/requests` APIs (deprecated in 4.3.0) for managing vulnerability exceptions have been removed. From e6453c5731385029e72ce0478c42776eb6024256 Mon Sep 17 00:00:00 2001 From: Yann Brillouet <91869377+rhybrillou@users.noreply.github.com> Date: Wed, 28 Jan 2026 19:51:59 +0100 Subject: [PATCH 061/232] refactor: rename dynamic origin trait to ephemeral (#18733) --- .../internaltokens/service/role_manager.go | 2 +- central/role/service/service_impl.go | 18 ++++++++--------- .../service_impl_accessscope_postgres_test.go | 16 +++++++-------- ...ervice_impl_permissionset_postgres_test.go | 16 +++++++-------- .../service_impl_role_postgres_test.go | 18 ++++++++--------- central/role/service/test_helpers_test.go | 2 +- generated/api/v1/auth_service.swagger.json | 4 ++-- .../api/v1/authprovider_service.swagger.json | 4 ++-- generated/api/v1/group_service.swagger.json | 6 +++--- .../api/v1/notifier_service.swagger.json | 4 ++-- generated/api/v1/role_service.swagger.json | 4 ++-- ...signature_integration_service.swagger.json | 4 ++-- generated/storage/traits.pb.go | 20 +++++++++---------- pkg/declarativeconfig/context.go | 4 ++-- pkg/declarativeconfig/context_test.go | 14 ++++++------- pkg/declarativeconfig/origin.go | 8 ++++---- pkg/declarativeconfig/origin_test.go | 20 +++++++++---------- proto/storage/proto.lock | 2 +- proto/storage/traits.proto | 8 ++++---- ui/apps/platform/src/types/traits.proto.ts | 2 +- ui/apps/platform/src/utils/traits.utils.ts | 2 +- 21 files changed, 89 insertions(+), 89 deletions(-) diff --git a/central/auth/internaltokens/service/role_manager.go b/central/auth/internaltokens/service/role_manager.go index 75440a8c033e4..3250930acdd43 100644 --- a/central/auth/internaltokens/service/role_manager.go +++ b/central/auth/internaltokens/service/role_manager.go @@ -42,7 +42,7 @@ type roleManager struct { func generateTraitsWithExpiry(expiresAt time.Time) (*storage.Traits, error) { ts, err := protocompat.ConvertTimeToTimestampOrError(expiresAt) return &storage.Traits{ - Origin: storage.Traits_DYNAMIC, + Origin: storage.Traits_EPHEMERAL, ExpiresAt: ts, }, err } diff --git a/central/role/service/service_impl.go b/central/role/service/service_impl.go index e9166135c1fde..c828bf5a449c9 100644 --- a/central/role/service/service_impl.go +++ b/central/role/service/service_impl.go @@ -84,7 +84,7 @@ func (*serviceImpl) AuthFuncOverride(ctx context.Context, fullMethodName string) func (s *serviceImpl) GetRoles(ctx context.Context, _ *v1.Empty) (*v1.GetRolesResponse, error) { roles, err := s.roleDataStore.GetRolesFiltered(ctx, func(role *storage.Role) bool { // filter out dynamic roles created to back Rox tokens issued for internal purposes. - return role.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + return role.GetTraits().GetOrigin() != storage.Traits_EPHEMERAL }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve roles") @@ -120,7 +120,7 @@ func (s *serviceImpl) CreateRole(ctx context.Context, roleRequest *v1.CreateRole if role.GetName() != "" && role.GetName() != roleRequest.GetName() { return nil, errox.InvalidArgs.CausedBy("different role names in path and body") } - if role.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if role.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic roles can only be created by internal services") } role.Name = roleRequest.GetName() @@ -133,7 +133,7 @@ func (s *serviceImpl) CreateRole(ctx context.Context, roleRequest *v1.CreateRole } func (s *serviceImpl) UpdateRole(ctx context.Context, role *storage.Role) (*v1.Empty, error) { - if role.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if role.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic roles cannot be modified by API") } err := s.roleDataStore.UpdateRole(ctx, role) @@ -194,7 +194,7 @@ func (s *serviceImpl) GetPermissionSet(ctx context.Context, id *v1.ResourceByID) func (s *serviceImpl) ListPermissionSets(ctx context.Context, _ *v1.Empty) (*v1.ListPermissionSetsResponse, error) { permissionSets, err := s.roleDataStore.GetPermissionSetsFiltered(ctx, func(permissionSet *storage.PermissionSet) bool { // filter out dynamic permission sets created to back Rox tokens issued for internal purposes. - return permissionSet.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + return permissionSet.GetTraits().GetOrigin() != storage.Traits_EPHEMERAL }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve permission sets") @@ -214,7 +214,7 @@ func (s *serviceImpl) PostPermissionSet(ctx context.Context, permissionSet *stor } permissionSet.Id = rolePkg.GeneratePermissionSetID() - if permissionSet.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if permissionSet.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic permission sets can only be created by internal services") } @@ -231,7 +231,7 @@ func (s *serviceImpl) PostPermissionSet(ctx context.Context, permissionSet *stor } func (s *serviceImpl) PutPermissionSet(ctx context.Context, permissionSet *storage.PermissionSet) (*v1.Empty, error) { - if permissionSet.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if permissionSet.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic permission sets cannot be modified by API") } err := s.roleDataStore.UpdatePermissionSet(ctx, permissionSet) @@ -270,7 +270,7 @@ func (s *serviceImpl) GetSimpleAccessScope(ctx context.Context, id *v1.ResourceB func (s *serviceImpl) ListSimpleAccessScopes(ctx context.Context, _ *v1.Empty) (*v1.ListSimpleAccessScopesResponse, error) { scopes, err := s.roleDataStore.GetAccessScopesFiltered(ctx, func(scope *storage.SimpleAccessScope) bool { // filter out dynamic access scopes created to back Rox tokens issued for internal purposes. - return scope.GetTraits().GetOrigin() != storage.Traits_DYNAMIC + return scope.GetTraits().GetOrigin() != storage.Traits_EPHEMERAL }) if err != nil { return nil, errors.Wrap(err, "failed to retrieve access scopes") @@ -289,7 +289,7 @@ func (s *serviceImpl) PostSimpleAccessScope(ctx context.Context, scope *storage. return nil, errox.InvalidArgs.CausedBy("setting id field is not allowed") } scope.Id = rolePkg.GenerateAccessScopeID() - if scope.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if scope.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic access scopes can only be created by internal services") } @@ -305,7 +305,7 @@ func (s *serviceImpl) PostSimpleAccessScope(ctx context.Context, scope *storage. } func (s *serviceImpl) PutSimpleAccessScope(ctx context.Context, scope *storage.SimpleAccessScope) (*v1.Empty, error) { - if scope.GetTraits().GetOrigin() == storage.Traits_DYNAMIC { + if scope.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL { return nil, errox.InvalidArgs.CausedBy("dynamic access scopes cannot be modified by API") } err := s.roleDataStore.UpdateAccessScope(ctx, scope) diff --git a/central/role/service/service_impl_accessscope_postgres_test.go b/central/role/service/service_impl_accessscope_postgres_test.go index 0fff6ff7b8903..1ebcbcfdd317e 100644 --- a/central/role/service/service_impl_accessscope_postgres_test.go +++ b/central/role/service/service_impl_accessscope_postgres_test.go @@ -46,12 +46,12 @@ func (s *serviceImplAccessScopeTestSuite) TestListAccessScopes() { accessScopeName2 := "TestListAccessScopes_imperativeOriginTraits" accessScopeName3 := "TestListAccessScopes_declarativeOriginTraits" accessScopeName4 := "TestListAccessScopes_orphanedDeclarativeOriginTraits" - accessScopeName5 := "TestListAccessScopes_dynamicOriginTraits" + accessScopeName5 := "TestListAccessScopes_ephemeralOriginTraits" scope1 := s.tester.createAccessScope(t, accessScopeName1, nilTraits) scope2 := s.tester.createAccessScope(t, accessScopeName2, imperativeOriginTraits) scope3 := s.tester.createAccessScope(t, accessScopeName3, declarativeOriginTraits) scope4 := s.tester.createAccessScope(t, accessScopeName4, orphanedDeclarativeOriginTraits) - scope5 := s.tester.createAccessScope(t, accessScopeName5, dynamicOriginTraits) + scope5 := s.tester.createAccessScope(t, accessScopeName5, ephemeralOriginTraits) scopes, err := s.tester.service.ListSimpleAccessScopes(ctx, &v1.Empty{}) s.NoError(err) @@ -61,7 +61,7 @@ func (s *serviceImplAccessScopeTestSuite) TestListAccessScopes() { protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope2) protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope3) protoassert.SliceContains(s.T(), scopes.GetAccessScopes(), scope4) - // Roles with dynamic origin are filtered out. + // Roles with ephemeral origin are filtered out. protoassert.SliceNotContains(s.T(), scopes.GetAccessScopes(), scope5) } @@ -77,9 +77,9 @@ func (s *serviceImplAccessScopeTestSuite) TestPostAccessScope() { inputScope.Id = scope.GetId() protoassert.Equal(s.T(), inputScope, scope) }) - s.Run("Dynamic scopes cannot be created by API", func() { + s.Run("ephemeral scopes cannot be created by API", func() { inputScope := &storage.SimpleAccessScope{ - Traits: dynamicOriginTraits, + Traits: ephemeralOriginTraits, } ctx := sac.WithAllAccess(s.T().Context()) scope, err := s.tester.service.PostSimpleAccessScope(ctx, inputScope) @@ -98,9 +98,9 @@ func (s *serviceImplAccessScopeTestSuite) TestPutAccessScope() { _, err := s.tester.service.PutSimpleAccessScope(ctx, updatedScope) s.NoError(err) }) - s.Run("Dynamic scopes cannot be created by API", func() { - scopeName := "Dynamic access scope" - inputScope := s.tester.createAccessScope(s.T(), scopeName, dynamicOriginTraits) + s.Run("Ephemeral scopes cannot be created by API", func() { + scopeName := "Ephemeral access scope" + inputScope := s.tester.createAccessScope(s.T(), scopeName, ephemeralOriginTraits) updatedScope := inputScope.CloneVT() updatedScope.Description = "Updated description" ctx := sac.WithAllAccess(s.T().Context()) diff --git a/central/role/service/service_impl_permissionset_postgres_test.go b/central/role/service/service_impl_permissionset_postgres_test.go index 321accf10d56e..449f38569b1af 100644 --- a/central/role/service/service_impl_permissionset_postgres_test.go +++ b/central/role/service/service_impl_permissionset_postgres_test.go @@ -46,12 +46,12 @@ func (s *serviceImplPermissionSetTestSuite) TestListPermissionSets() { permissionSetName2 := "TestListPermissionSets_imperativeOriginTraits" permissionSetName3 := "TestListPermissionSets_declarativeOriginTraits" permissionSetName4 := "TestListPermissionSets_orphanedDeclarativeOriginTraits" - permissionSetName5 := "TestListPermissionSets_dynamicOriginTraits" + permissionSetName5 := "TestListPermissionSets_ephemeralOriginTraits" permissionSet1 := s.tester.createPermissionSet(t, permissionSetName1, nilTraits) permissionSet2 := s.tester.createPermissionSet(t, permissionSetName2, imperativeOriginTraits) permissionSet3 := s.tester.createPermissionSet(t, permissionSetName3, declarativeOriginTraits) permissionSet4 := s.tester.createPermissionSet(t, permissionSetName4, orphanedDeclarativeOriginTraits) - permissionSet5 := s.tester.createPermissionSet(t, permissionSetName5, dynamicOriginTraits) + permissionSet5 := s.tester.createPermissionSet(t, permissionSetName5, ephemeralOriginTraits) permissionSets, err := s.tester.service.ListPermissionSets(ctx, &v1.Empty{}) s.NoError(err) @@ -61,7 +61,7 @@ func (s *serviceImplPermissionSetTestSuite) TestListPermissionSets() { protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet2) protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet3) protoassert.SliceContains(s.T(), permissionSets.GetPermissionSets(), permissionSet4) - // Roles with dynamic origin are filtered out. + // Roles with ephemeral origin are filtered out. protoassert.SliceNotContains(s.T(), permissionSets.GetPermissionSets(), permissionSet5) } @@ -76,9 +76,9 @@ func (s *serviceImplPermissionSetTestSuite) TestPostPermissionSet() { inputPermissionSet.Id = permissionSet.GetId() protoassert.Equal(s.T(), inputPermissionSet, permissionSet) }) - s.Run("Dynamic scopes cannot be created by API", func() { + s.Run("Ephemeral scopes cannot be created by API", func() { inputScope := &storage.SimpleAccessScope{ - Traits: dynamicOriginTraits, + Traits: ephemeralOriginTraits, } ctx := sac.WithAllAccess(s.T().Context()) scope, err := s.tester.service.PostSimpleAccessScope(ctx, inputScope) @@ -97,9 +97,9 @@ func (s *serviceImplPermissionSetTestSuite) TestPutPermissionSet() { _, err := s.tester.service.PutPermissionSet(ctx, updatedPermissionSet) s.NoError(err) }) - s.Run("Dynamic scopes cannot be created by API", func() { - permissionSetName := "Dynamic permission set" - inputPermissionSet := s.tester.createPermissionSet(s.T(), permissionSetName, dynamicOriginTraits) + s.Run("Ephemeral scopes cannot be created by API", func() { + permissionSetName := "Ephemeral permission set" + inputPermissionSet := s.tester.createPermissionSet(s.T(), permissionSetName, ephemeralOriginTraits) updatedPermissionSet := inputPermissionSet.CloneVT() updatedPermissionSet.Description = "Updated description" ctx := sac.WithAllAccess(s.T().Context()) diff --git a/central/role/service/service_impl_role_postgres_test.go b/central/role/service/service_impl_role_postgres_test.go index d8c39a8d511de..5de1d22edb0d1 100644 --- a/central/role/service/service_impl_role_postgres_test.go +++ b/central/role/service/service_impl_role_postgres_test.go @@ -132,12 +132,12 @@ func (s *serviceImplRoleTestSuite) TestGetRoles() { roleName2 := "TestGetRoles_imperativeOriginTraits" roleName3 := "TestGetRoles_declarativeOriginTraits" roleName4 := "TestGetRoles_orphanedDeclarativeOriginTraits" - roleName5 := "TestGetRoles_dynamicOriginTraits" + roleName5 := "TestGetRoles_ephemeralOriginTraits" role1 := s.tester.createRole(t, roleName1, nilTraits) role2 := s.tester.createRole(t, roleName2, imperativeOriginTraits) role3 := s.tester.createRole(t, roleName3, declarativeOriginTraits) role4 := s.tester.createRole(t, roleName4, orphanedDeclarativeOriginTraits) - role5 := s.tester.createRole(t, roleName5, dynamicOriginTraits) + role5 := s.tester.createRole(t, roleName5, ephemeralOriginTraits) roles, err := s.tester.service.GetRoles(ctx, &v1.Empty{}) s.NoError(err) @@ -147,7 +147,7 @@ func (s *serviceImplRoleTestSuite) TestGetRoles() { protoassert.SliceContains(s.T(), roles.GetRoles(), role2) protoassert.SliceContains(s.T(), roles.GetRoles(), role3) protoassert.SliceContains(s.T(), roles.GetRoles(), role4) - // Roles with dynamic origin are filtered out. + // Roles with ephemeral origin are filtered out. protoassert.SliceNotContains(s.T(), roles.GetRoles(), role5) } @@ -174,15 +174,15 @@ func (s *serviceImplRoleTestSuite) TestCreateRole() { s.NoError(fetchErr) protoassert.Equal(s.T(), roleCreationRequest.GetRole(), role) }) - s.Run("Dynamic roles cannot be created by API", func() { - roleName := "Dynamic test role" + s.Run("Ephemeral roles cannot be created by API", func() { + roleName := "Ephemeral test role" roleCreationRequest := &v1.CreateRoleRequest{ Name: roleName, Role: &storage.Role{ Name: roleName, PermissionSetId: accesscontrol.DefaultPermissionSetIDs[accesscontrol.Admin], AccessScopeId: accesscontrol.DefaultAccessScopeIDs[accesscontrol.UnrestrictedAccessScope], - Traits: dynamicOriginTraits, + Traits: ephemeralOriginTraits, }, } ctx := sac.WithAllAccess(s.T().Context()) @@ -201,9 +201,9 @@ func (s *serviceImplRoleTestSuite) TestUpdateRole() { _, err := s.tester.service.UpdateRole(ctx, role) s.NoError(err) }) - s.Run("Dynamic roles cannot be updated by API", func() { - roleName := "Test update of dynamic role" - role := s.tester.createRole(s.T(), roleName, dynamicOriginTraits) + s.Run("Ephemeral roles cannot be updated by API", func() { + roleName := "Test update of ephemeral role" + role := s.tester.createRole(s.T(), roleName, ephemeralOriginTraits) updatedRole := role.CloneVT() updatedRole.Description = "Updated description" ctx := sac.WithAllAccess(s.T().Context()) diff --git a/central/role/service/test_helpers_test.go b/central/role/service/test_helpers_test.go index 386e556e97943..65db4100ea652 100644 --- a/central/role/service/test_helpers_test.go +++ b/central/role/service/test_helpers_test.go @@ -34,7 +34,7 @@ var ( orphanedDeclarativeOriginTraits = &storage.Traits{Origin: storage.Traits_DECLARATIVE_ORPHANED} - dynamicOriginTraits = &storage.Traits{Origin: storage.Traits_DYNAMIC} + ephemeralOriginTraits = &storage.Traits{Origin: storage.Traits_EPHEMERAL} ) type serviceImplTester struct { diff --git a/generated/api/v1/auth_service.swagger.json b/generated/api/v1/auth_service.swagger.json index 37b64e9ec38a3..6f05f7c6f7cd3 100644 --- a/generated/api/v1/auth_service.swagger.json +++ b/generated/api/v1/auth_service.swagger.json @@ -492,10 +492,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/authprovider_service.swagger.json b/generated/api/v1/authprovider_service.swagger.json index c0a0f83c3598b..18588c3526635 100644 --- a/generated/api/v1/authprovider_service.swagger.json +++ b/generated/api/v1/authprovider_service.swagger.json @@ -615,10 +615,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/group_service.swagger.json b/generated/api/v1/group_service.swagger.json index 3d9617e78d00a..cae2d857b80b6 100644 --- a/generated/api/v1/group_service.swagger.json +++ b/generated/api/v1/group_service.swagger.json @@ -73,7 +73,7 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE" }, @@ -412,10 +412,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/notifier_service.swagger.json b/generated/api/v1/notifier_service.swagger.json index 91715919ade25..74d23e5c7a315 100644 --- a/generated/api/v1/notifier_service.swagger.json +++ b/generated/api/v1/notifier_service.swagger.json @@ -904,10 +904,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/role_service.swagger.json b/generated/api/v1/role_service.swagger.json index 94bd86348d8dc..c286bd0c0e8b0 100644 --- a/generated/api/v1/role_service.swagger.json +++ b/generated/api/v1/role_service.swagger.json @@ -1088,10 +1088,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/api/v1/signature_integration_service.swagger.json b/generated/api/v1/signature_integration_service.swagger.json index 681392ae39700..961e29549b7f8 100644 --- a/generated/api/v1/signature_integration_service.swagger.json +++ b/generated/api/v1/signature_integration_service.swagger.json @@ -344,10 +344,10 @@ "DEFAULT", "DECLARATIVE", "DECLARATIVE_ORPHANED", - "DYNAMIC" + "EPHEMERAL" ], "default": "IMPERATIVE", - "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." + "description": "Origin specifies the origin of an object.\nObjects can have five different origins:\n- IMPERATIVE: the object was created via the API. This is assumed by default.\n- DEFAULT: the object is a default object, such as default roles, access scopes etc.\n- DECLARATIVE: the object is created via declarative configuration.\n- DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object)\n- EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral.\nBased on the origin, different rules apply to the objects.\nObjects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration.\nAdditionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin.\nObjects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration.\nThey may be referenced by all other objects.\nObjects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration.\nThey may reference all other objects.\nObjects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration.\nThey may reference all other objects.\nObjects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration.\nDECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration.\nObjects with this origin will be cleaned up from the system immediately after they are not referenced by other resources anymore.\nThey may be referenced by all other objects." }, "storageTraitsVisibility": { "type": "string", diff --git a/generated/storage/traits.pb.go b/generated/storage/traits.pb.go index 4bdf49fb8d111..2b52cd5e6ca8c 100644 --- a/generated/storage/traits.pb.go +++ b/generated/storage/traits.pb.go @@ -132,15 +132,15 @@ func (Traits_Visibility) EnumDescriptor() ([]byte, []int) { // - DEFAULT: the object is a default object, such as default roles, access scopes etc. // - DECLARATIVE: the object is created via declarative configuration. // - DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object) -// - DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral. +// - EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral. // Based on the origin, different rules apply to the objects. // Objects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration. -// Additionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin. +// Additionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin. // Objects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration. // They may be referenced by all other objects. // Objects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration. // They may reference all other objects. -// Objects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration. +// Objects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration. // They may reference all other objects. // Objects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration. // DECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration. @@ -153,7 +153,7 @@ const ( Traits_DEFAULT Traits_Origin = 1 Traits_DECLARATIVE Traits_Origin = 2 Traits_DECLARATIVE_ORPHANED Traits_Origin = 3 - Traits_DYNAMIC Traits_Origin = 4 + Traits_EPHEMERAL Traits_Origin = 4 ) // Enum value maps for Traits_Origin. @@ -163,14 +163,14 @@ var ( 1: "DEFAULT", 2: "DECLARATIVE", 3: "DECLARATIVE_ORPHANED", - 4: "DYNAMIC", + 4: "EPHEMERAL", } Traits_Origin_value = map[string]int32{ "IMPERATIVE": 0, "DEFAULT": 1, "DECLARATIVE": 2, "DECLARATIVE_ORPHANED": 3, - "DYNAMIC": 4, + "EPHEMERAL": 4, } ) @@ -277,7 +277,7 @@ var File_storage_traits_proto protoreflect.FileDescriptor const file_storage_traits_proto_rawDesc = "" + "\n" + - "\x14storage/traits.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\x03\n" + + "\x14storage/traits.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbd\x03\n" + "\x06Traits\x12G\n" + "\x0fmutability_mode\x18\x01 \x01(\x0e2\x1e.storage.Traits.MutabilityModeR\x0emutabilityMode\x12:\n" + "\n" + @@ -293,14 +293,14 @@ const file_storage_traits_proto_rawDesc = "" + "Visibility\x12\v\n" + "\aVISIBLE\x10\x00\x12\n" + "\n" + - "\x06HIDDEN\x10\x01\"]\n" + + "\x06HIDDEN\x10\x01\"_\n" + "\x06Origin\x12\x0e\n" + "\n" + "IMPERATIVE\x10\x00\x12\v\n" + "\aDEFAULT\x10\x01\x12\x0f\n" + "\vDECLARATIVE\x10\x02\x12\x18\n" + - "\x14DECLARATIVE_ORPHANED\x10\x03\x12\v\n" + - "\aDYNAMIC\x10\x04B.\n" + + "\x14DECLARATIVE_ORPHANED\x10\x03\x12\r\n" + + "\tEPHEMERAL\x10\x04B.\n" + "\x19io.stackrox.proto.storageZ\x11./storage;storageb\x06proto3" var ( diff --git a/pkg/declarativeconfig/context.go b/pkg/declarativeconfig/context.go index 3ebb4135f2b69..b507bbc34d561 100644 --- a/pkg/declarativeconfig/context.go +++ b/pkg/declarativeconfig/context.go @@ -41,7 +41,7 @@ func CanModifyResource(ctx context.Context, resource ResourceWithTraits) bool { return IsDeclarativeOrigin(resource) } if ctx.Value(originCheckerKey{}) == allowModifyDeclarativeOrImperative { - return IsDeclarativeOrigin(resource) || IsImperativeOrigin(resource) || IsDynamicOrigin(resource) + return IsDeclarativeOrigin(resource) || IsImperativeOrigin(resource) || IsEphemeralOrigin(resource) } - return IsImperativeOrigin(resource) || IsDynamicOrigin(resource) + return IsImperativeOrigin(resource) || IsEphemeralOrigin(resource) } diff --git a/pkg/declarativeconfig/context_test.go b/pkg/declarativeconfig/context_test.go index a412a27b7fa93..fd850da97f5b4 100644 --- a/pkg/declarativeconfig/context_test.go +++ b/pkg/declarativeconfig/context_test.go @@ -18,7 +18,7 @@ func (m *resourceWithTraitsMock) GetTraits() *storage.Traits { func TestContext(t *testing.T) { imperativeResource := &resourceWithTraitsMock{origin: storage.Traits_IMPERATIVE} - dynamicResource := &resourceWithTraitsMock{origin: storage.Traits_DYNAMIC} + ephemeralResource := &resourceWithTraitsMock{origin: storage.Traits_EPHEMERAL} declarativeResource := &resourceWithTraitsMock{origin: storage.Traits_DECLARATIVE} defaultResource := &resourceWithTraitsMock{origin: storage.Traits_DEFAULT} ctx := context.Background() @@ -26,8 +26,8 @@ func TestContext(t *testing.T) { declarativeOrImperativeCtx := WithModifyDeclarativeOrImperative(ctx) // 1.1. empty context can modify imperative origin assert.True(t, CanModifyResource(ctx, imperativeResource)) - // 1.2. empty context can modify dynamic origin - assert.True(t, CanModifyResource(ctx, dynamicResource)) + // 1.2. empty context can modify ephemeral origin + assert.True(t, CanModifyResource(ctx, ephemeralResource)) // 2. empty context can't modify declarative origin assert.False(t, CanModifyResource(ctx, declarativeResource)) // 3. empty context can't modify default origin @@ -36,16 +36,16 @@ func TestContext(t *testing.T) { assert.True(t, CanModifyResource(declarativeCtx, declarativeResource)) // 5.1. context.WithModifyDeclarativeResource can't modify imperative origin assert.False(t, CanModifyResource(declarativeCtx, imperativeResource)) - // 5.2. context.WithModifyDeclarativeResource can't modify dynamic origin - assert.False(t, CanModifyResource(declarativeCtx, dynamicResource)) + // 5.2. context.WithModifyDeclarativeResource can't modify ephemeral origin + assert.False(t, CanModifyResource(declarativeCtx, ephemeralResource)) // 6. context.WithModifyDeclarativeResource can't modify default origin assert.False(t, CanModifyResource(declarativeCtx, defaultResource)) // 7. context.WithModifyDeclarativeOrImperative can modify declarative origin assert.True(t, CanModifyResource(declarativeOrImperativeCtx, declarativeResource)) // 8.1. context.WithModifyDeclarativeOrImperative can modify imperative origin assert.True(t, CanModifyResource(declarativeOrImperativeCtx, imperativeResource)) - // 8.2. context.WithModifyDeclarativeOrImperative can modify dynamic origin - assert.True(t, CanModifyResource(declarativeOrImperativeCtx, dynamicResource)) + // 8.2. context.WithModifyDeclarativeOrImperative can modify ephemeral origin + assert.True(t, CanModifyResource(declarativeOrImperativeCtx, ephemeralResource)) // 9. context.WithModifyDeclarativeOrImperative can't modify default origin assert.False(t, CanModifyResource(declarativeOrImperativeCtx, defaultResource)) } diff --git a/pkg/declarativeconfig/origin.go b/pkg/declarativeconfig/origin.go index 9e3b055e16787..f23b14a6a7cd0 100644 --- a/pkg/declarativeconfig/origin.go +++ b/pkg/declarativeconfig/origin.go @@ -7,7 +7,7 @@ import ( // VerifyReferencedResourceOrigin returns an error if resource is forbidden from referencing other resource. func VerifyReferencedResourceOrigin(referenced, referencing ResourceWithTraits, referencedName, referencingName string) error { - if !IsDeclarativeOrigin(referencing) || (!IsImperativeOrigin(referenced) && !IsDynamicOrigin(referenced)) { + if !IsDeclarativeOrigin(referencing) || (!IsImperativeOrigin(referenced) && !IsEphemeralOrigin(referenced)) { return nil } // referenced is imperative or default, while referencing is not @@ -19,9 +19,9 @@ func IsDeclarativeOrigin(resource ResourceWithTraits) bool { return resource.GetTraits().GetOrigin() == storage.Traits_DECLARATIVE || resource.GetTraits().GetOrigin() == storage.Traits_DECLARATIVE_ORPHANED } -// IsDynamicOrigin returns whether origin of resource is dynamic or not. -func IsDynamicOrigin(resource ResourceWithTraits) bool { - return resource.GetTraits().GetOrigin() == storage.Traits_DYNAMIC +// IsEphemeralOrigin returns whether origin of resource is ephemeral or not. +func IsEphemeralOrigin(resource ResourceWithTraits) bool { + return resource.GetTraits().GetOrigin() == storage.Traits_EPHEMERAL } // IsImperativeOrigin returns whether origin of resource is imperative or not. diff --git a/pkg/declarativeconfig/origin_test.go b/pkg/declarativeconfig/origin_test.go index 0ad1d22ad90b9..f48098934ddf3 100644 --- a/pkg/declarativeconfig/origin_test.go +++ b/pkg/declarativeconfig/origin_test.go @@ -20,8 +20,8 @@ func TestVerifyReferencedResourceOrigin(t *testing.T) { declarativeTraits := &storage.Traits{ Origin: storage.Traits_DECLARATIVE, } - dynamicTraits := &storage.Traits{ - Origin: storage.Traits_DYNAMIC, + ephemeralTraits := &storage.Traits{ + Origin: storage.Traits_EPHEMERAL, } imperativeTraits := &storage.Traits{ Origin: storage.Traits_IMPERATIVE, @@ -38,25 +38,25 @@ func TestVerifyReferencedResourceOrigin(t *testing.T) { testNoError(t, declarativeTraits, orphanedTraits) testNoError(t, declarativeTraits, defaultTraits) testError(t, declarativeTraits, imperativeTraits) - testError(t, declarativeTraits, dynamicTraits) + testError(t, declarativeTraits, ephemeralTraits) testNoError(t, orphanedTraits, declarativeTraits) testNoError(t, orphanedTraits, orphanedTraits) testNoError(t, orphanedTraits, defaultTraits) testError(t, orphanedTraits, imperativeTraits) - testError(t, orphanedTraits, dynamicTraits) + testError(t, orphanedTraits, ephemeralTraits) testNoError(t, imperativeTraits, declarativeTraits) testNoError(t, imperativeTraits, orphanedTraits) testNoError(t, imperativeTraits, defaultTraits) testNoError(t, imperativeTraits, imperativeTraits) - testNoError(t, imperativeTraits, dynamicTraits) + testNoError(t, imperativeTraits, ephemeralTraits) - testNoError(t, dynamicTraits, declarativeTraits) - testNoError(t, dynamicTraits, orphanedTraits) - testNoError(t, dynamicTraits, defaultTraits) - testNoError(t, dynamicTraits, imperativeTraits) - testNoError(t, dynamicTraits, dynamicTraits) + testNoError(t, ephemeralTraits, declarativeTraits) + testNoError(t, ephemeralTraits, orphanedTraits) + testNoError(t, ephemeralTraits, defaultTraits) + testNoError(t, ephemeralTraits, imperativeTraits) + testNoError(t, ephemeralTraits, ephemeralTraits) } func testError(t *testing.T, referencing *storage.Traits, referenced *storage.Traits) { diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 6fbcf0b159ace..a168c3d3e088c 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -18617,7 +18617,7 @@ "integer": 3 }, { - "name": "DYNAMIC", + "name": "EPHEMERAL", "integer": 4 } ] diff --git a/proto/storage/traits.proto b/proto/storage/traits.proto index dd17d33f38664..894802432c5ec 100644 --- a/proto/storage/traits.proto +++ b/proto/storage/traits.proto @@ -39,15 +39,15 @@ message Traits { // - DEFAULT: the object is a default object, such as default roles, access scopes etc. // - DECLARATIVE: the object is created via declarative configuration. // - DECLARATIVE_ORPHANED: the object is created via declarative configuration and then unsuccessfully deleted(for example, because it is referenced by another object) - // - DYNAMIC: the object is created via an internal API, generated on the fly and meant to be ephemeral. + // - EPHEMERAL: the object is created via an internal API, generated on the fly and meant to be ephemeral. // Based on the origin, different rules apply to the objects. // Objects with the DECLARATIVE origin are not allowed to be modified via API, only via declarative configuration. - // Additionally, they may not reference objects with the IMPERATIVE or DYNAMIC origin. + // Additionally, they may not reference objects with the IMPERATIVE or EPHEMERAL origin. // Objects with the DEFAULT origin are not allowed to be modified via either API or declarative configuration. // They may be referenced by all other objects. // Objects with the IMPERATIVE origin are allowed to be modified via API, not via declarative configuration. // They may reference all other objects. - // Objects with the DYNAMIC origin are neither allowed to be modified via API, nor via declarative configuration. + // Objects with the EPHEMERAL origin are neither allowed to be modified via API, nor via declarative configuration. // They may reference all other objects. // Objects with the DECLARATIVE_ORPHANED origin are not allowed to be modified via either API or declarative configuration. // DECLARATIVE_ORPHANED resource can become DECLARATIVE again if it is redefined in declarative configuration. @@ -58,7 +58,7 @@ message Traits { DEFAULT = 1; DECLARATIVE = 2; DECLARATIVE_ORPHANED = 3; - DYNAMIC = 4; + EPHEMERAL = 4; } Origin origin = 3; diff --git a/ui/apps/platform/src/types/traits.proto.ts b/ui/apps/platform/src/types/traits.proto.ts index 72f02b85f53bb..8fedb94f77712 100644 --- a/ui/apps/platform/src/types/traits.proto.ts +++ b/ui/apps/platform/src/types/traits.proto.ts @@ -10,5 +10,5 @@ export type TraitsOrigin = | 'DECLARATIVE' | 'DEFAULT' | 'DECLARATIVE_ORPHANED' - | 'DYNAMIC'; + | 'EPHEMERAL'; export type TraitsVisibility = 'VISIBLE' | 'HIDDEN'; diff --git a/ui/apps/platform/src/utils/traits.utils.ts b/ui/apps/platform/src/utils/traits.utils.ts index 224af1919ce41..928e9dea16398 100644 --- a/ui/apps/platform/src/utils/traits.utils.ts +++ b/ui/apps/platform/src/utils/traits.utils.ts @@ -9,7 +9,7 @@ export const traitsOriginLabels = { IMPERATIVE: 'User', DECLARATIVE: 'Declarative', DECLARATIVE_ORPHANED: 'Declarative, Orphaned', - DYNAMIC: 'Dynamic', + EPHEMERAL: 'Ephemeral', } as const; export const originLabelColours = { From 34b1a89f16fb5071a778163c8111febddd499b08 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Wed, 28 Jan 2026 13:53:18 -0500 Subject: [PATCH 062/232] ROX-32843: m2m rollback (#18732) --- central/auth/datastore/datastore_impl.go | 2 +- central/auth/datastore/datastore_impl_test.go | 63 +++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/central/auth/datastore/datastore_impl.go b/central/auth/datastore/datastore_impl.go index be8cbef83eda2..f54b63b4fb31f 100644 --- a/central/auth/datastore/datastore_impl.go +++ b/central/auth/datastore/datastore_impl.go @@ -101,7 +101,7 @@ func (d *datastoreImpl) upsertAuthM2MConfigNoLock(ctx context.Context, // Upsert the token exchanger first, ensuring the config is valid and a token exchanger can be successfully // created from it. if err := d.set.UpsertTokenExchanger(ctx, config); err != nil { - return nil, d.wrapRollBackSet(ctx, err, storedConfig, config, existingExchanger) + return nil, d.wrapRollback(ctx, tx, err, storedConfig, config, existingExchanger) } // Upsert the config to the DB after the token exchanger has been successfully added. diff --git a/central/auth/datastore/datastore_impl_test.go b/central/auth/datastore/datastore_impl_test.go index c851bee44cdba..6b85ed42dec92 100644 --- a/central/auth/datastore/datastore_impl_test.go +++ b/central/auth/datastore/datastore_impl_test.go @@ -4,6 +4,7 @@ package datastore import ( "context" + "errors" "fmt" "testing" @@ -448,3 +449,65 @@ func (s *datastorePostgresTestSuite) TestDeclarativeUpserts() { }) } } + +// TestUpsertTokenExchangerFailureRollsBackTransaction verifies that when UpsertTokenExchanger +// fails, the database transaction is properly rolled back and no data is persisted. +// This test was added to prevent connection leaks where the transaction was not being +// rolled back when the token exchanger creation failed (e.g., OIDC provider errors). +func (s *datastorePostgresTestSuite) TestUpsertTokenExchangerFailureRollsBackTransaction() { + controller := gomock.NewController(s.T()) + defer controller.Finish() + + authStore := store.New(s.pool.DB) + + // Mock UpsertTokenExchanger to return an error (simulating OIDC provider failure) + tokenExchangerError := errors.New("creating OIDC provider: 404 Not Found: NoSuchBucket") + mockSet := mocks.NewMockTokenExchangerSet(controller) + mockSet.EXPECT().GetTokenExchanger(gomock.Any()).Return(nil, false).AnyTimes() + mockSet.EXPECT().UpsertTokenExchanger(gomock.Any(), gomock.Any()).Return(tokenExchangerError).Times(1) + // Expect rollback operations to be called + mockSet.EXPECT().RemoveTokenExchanger(gomock.Any()).Return(nil).Times(1) + + authDataStore := New(authStore, s.roleDataStore, mockSet) + + testConfigID := uuid.NewV4().String() + testIssuerURL := "https://storage.googleapis.com/test-bucket" + + config := &storage.AuthMachineToMachineConfig{ + Id: testConfigID, + Type: storage.AuthMachineToMachineConfig_GENERIC, + TokenExpirationDuration: "5m", + Mappings: []*storage.AuthMachineToMachineConfig_Mapping{ + { + Key: "sub", + ValueExpression: "test-value", + Role: testRole1, + }, + }, + Issuer: testIssuerURL, + } + + // Attempt to upsert - should fail + result, err := authDataStore.UpsertAuthM2MConfig(s.ctx, config) + s.Error(err, "UpsertAuthM2MConfig should return an error when UpsertTokenExchanger fails") + s.Nil(result, "Result should be nil when upsert fails") + s.Contains(err.Error(), "NoSuchBucket", "Error should contain the original error message") + + // Verify NO config was persisted (transaction was rolled back) + // Use a fresh datastore with a permissive mock to read from the database + readMockSet := mocks.NewMockTokenExchangerSet(controller) + readMockSet.EXPECT().GetTokenExchanger(gomock.Any()).Return(nil, false).AnyTimes() + readMockSet.EXPECT().UpsertTokenExchanger(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + + readDataStore := New(authStore, s.roleDataStore, readMockSet) + + var foundConfig *storage.AuthMachineToMachineConfig + err = readDataStore.ForEachAuthM2MConfig(s.ctx, func(obj *storage.AuthMachineToMachineConfig) error { + if obj.GetId() == testConfigID || obj.GetIssuer() == testIssuerURL { + foundConfig = obj + } + return nil + }) + s.NoError(err) + s.Nil(foundConfig, "Config should NOT be persisted when UpsertTokenExchanger fails - transaction should have been rolled back") +} From 5e1371f3e8f1003efdb6d67098083dd983774e89 Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Wed, 28 Jan 2026 20:03:18 +0100 Subject: [PATCH 063/232] ROX-32838: Document Console Plugin in Operator CSV (#18694) --- .../rhacs-operator.clusterserviceversion.yaml | 94 ++++++++++--------- .../rhacs-operator.clusterserviceversion.yaml | 3 + .../rhacs-operator.clusterserviceversion.yaml | 3 + 3 files changed, 55 insertions(+), 45 deletions(-) diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index b81b775108383..66fc06d0c03fc 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -1857,51 +1857,55 @@ spec: \ back to Central. These services allow users to enforce policies and monitor\ \ your OpenShift and Kubernetes clusters. Secured Cluster Services come as two\ \ Deployments (Sensor and Admission Controller) and one DaemonSet (Collector).\n\ - \n### Central Services Explained\n\n| Service | Deployment\ - \ Type | Description |\n| :------------------------------- | :--------------\ - \ | :-------------- |\n| Central | Deployment |\ - \ Users interact with Red Hat Advanced Cluster Security through the user interface\ - \ or APIs on Central. Central also sends notifications for violations and interacts\ - \ with integrations. |\n| Central DB | Deployment |\ - \ Central DB is a PostgreSQL-based persistent storage for the data collected and\ - \ managed by Central. |\n| Scanner | Deployment \ - \ | Scanner is a Red Hat developed and certified image scanner. Scanner analyzes\ - \ and reports vulnerabilities for images. Scanner uses HPA to scale the number\ - \ of replicas based on workload. |\n| Scanner DB | Deployment\ - \ | Scanner DB is a cache for vulnerability definitions to serve vulnerability\ - \ scanning use cases throughout the software development life cycle. |\n\n###\ - \ Secured Cluster Services Explained\n\n| Service | Deployment\ - \ Type | Description |\n| :------------------------------- | :--------------\ - \ | :-------------- |\n| Sensor | Deployment |\ - \ Sensor analyzes and monitors Kubernetes in secured clusters. |\n| Collector\ - \ | DaemonSet | Analyzes and monitors container activity\ - \ on Kubernetes nodes.|\n| Admission Controller | Deployment \ - \ | ValidatingWebhookConfiguration for enforcing policies in the deploy lifecycle.\ - \ |\n\n### Central Custom Resource\n\nCentral Services is the configuration template\ - \ for RHACS Central deployment. For all customization options, please visit the\ - \ RHACS documentation.\n\n### SecuredCluster Custom Resource\n\nSecuredCluster\ - \ is the configuration template for the RHACS Secured Cluster services.\n\n####\ - \ Installation Prerequisites\n\nBefore deploying a SecuredCluster resource, you\ - \ need to create a cluster init bundle secret.\n\n- **Through the RHACS UI:**\ - \ To create a cluster init bundle secret through the RHACS UI, navigate to `Platform\ - \ Configuration > Clusters`, and then click `Manage Tokens` in the top-right corner.\ - \ Select `Cluster Init Bundle`, and click `Generate Bundle`. Select `Download\ - \ Kubernetes secrets file`, and store the file under a name of your choice (for\ - \ example, `cluster-init-secrets.yaml`).\n- **Through the `roxctl` CLI:** To create\ - \ a cluster init bundle secret through the `roxctl` command-line interface, run\ - \ `roxctl central init-bundles generate --output-secrets `.\ - \ Choose any `name` and `file name` that you like.\n\nRun `oc project` and check\ - \ that it reports the correct namespace where you intend to deploy SecuredCluster.\ - \ In case you want to install SecuredCluster to a different namespace, select\ - \ it by running `oc project `.\nThen, run `oc create -f init-bundle.yaml`.\ - \ If you have chosen a name other than `init-bundle.yaml`, specify that file name\ - \ instead.\n\n#### Required Fields\n\nThe following attributes are required to\ - \ be specified. For all customization options, please visit the RHACS documentation.\n\ - \n| Parameter | Description |\n| :----------------- | :--------------\ - \ |\n| `clusterName` | The name given to this secured cluster. The cluster\ - \ will appear with this name in RHACS user interface. |\n| `centralEndpoint` \ - \ | This field should specify the address of the Central endpoint, including the\ - \ port number. `centralEndpoint` may be omitted if this SecuredCluster Custom\ + \n**Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability\ + \ management information in the OpenShift web console. Install a SecuredCluster\ + \ to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed\ + \ Operators* or by modifying the console Operator configuration.\n**Important:**\ + \ The Console Plugin requires OpenShift 4.19 or later.\n\n### Central Services\ + \ Explained\n\n| Service | Deployment Type | Description\ + \ |\n| :------------------------------- | :-------------- | :--------------\ + \ |\n| Central | Deployment | Users interact with\ + \ Red Hat Advanced Cluster Security through the user interface or APIs on Central.\ + \ Central also sends notifications for violations and interacts with integrations.\ + \ |\n| Central DB | Deployment | Central DB is a PostgreSQL-based\ + \ persistent storage for the data collected and managed by Central. |\n| Scanner\ + \ | Deployment | Scanner is a Red Hat developed\ + \ and certified image scanner. Scanner analyzes and reports vulnerabilities for\ + \ images. Scanner uses HPA to scale the number of replicas based on workload.\ + \ |\n| Scanner DB | Deployment | Scanner DB is a cache\ + \ for vulnerability definitions to serve vulnerability scanning use cases throughout\ + \ the software development life cycle. |\n\n### Secured Cluster Services Explained\n\ + \n| Service | Deployment Type | Description |\n|\ + \ :------------------------------- | :-------------- | :-------------- |\n| Sensor\ + \ | Deployment | Sensor analyzes and monitors Kubernetes\ + \ in secured clusters. |\n| Collector | DaemonSet \ + \ | Analyzes and monitors container activity on Kubernetes nodes.|\n| Admission\ + \ Controller | Deployment | ValidatingWebhookConfiguration for\ + \ enforcing policies in the deploy lifecycle. |\n\n### Central Custom Resource\n\ + \nCentral Services is the configuration template for RHACS Central deployment.\ + \ For all customization options, please visit the RHACS documentation.\n\n###\ + \ SecuredCluster Custom Resource\n\nSecuredCluster is the configuration template\ + \ for the RHACS Secured Cluster services.\n\n#### Installation Prerequisites\n\ + \nBefore deploying a SecuredCluster resource, you need to create a cluster init\ + \ bundle secret.\n\n- **Through the RHACS UI:** To create a cluster init bundle\ + \ secret through the RHACS UI, navigate to `Platform Configuration > Clusters`,\ + \ and then click `Manage Tokens` in the top-right corner. Select `Cluster Init\ + \ Bundle`, and click `Generate Bundle`. Select `Download Kubernetes secrets file`,\ + \ and store the file under a name of your choice (for example, `cluster-init-secrets.yaml`).\n\ + - **Through the `roxctl` CLI:** To create a cluster init bundle secret through\ + \ the `roxctl` command-line interface, run `roxctl central init-bundles generate\ + \ --output-secrets `. Choose any `name` and `file name` that\ + \ you like.\n\nRun `oc project` and check that it reports the correct namespace\ + \ where you intend to deploy SecuredCluster. In case you want to install SecuredCluster\ + \ to a different namespace, select it by running `oc project `.\nThen,\ + \ run `oc create -f init-bundle.yaml`. If you have chosen a name other than `init-bundle.yaml`,\ + \ specify that file name instead.\n\n#### Required Fields\n\nThe following attributes\ + \ are required to be specified. For all customization options, please visit the\ + \ RHACS documentation.\n\n| Parameter | Description |\n| :-----------------\ + \ | :-------------- |\n| `clusterName` | The name given to this secured cluster.\ + \ The cluster will appear with this name in RHACS user interface. |\n| `centralEndpoint`\ + \ | This field should specify the address of the Central endpoint, including\ + \ the port number. `centralEndpoint` may be omitted if this SecuredCluster Custom\ \ Resource is in the same cluster and namespace as Central. |\n" displayName: Advanced Cluster Security for Kubernetes icon: diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index 1bc1d81c05820..10dd4a701b801 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -1586,6 +1586,9 @@ spec: 2. **Secured Cluster Services** - Secured cluster services are placed on each cluster you manage and report back to Central. These services allow users to enforce policies and monitor your OpenShift and Kubernetes clusters. Secured Cluster Services come as two Deployments (Sensor and Admission Controller) and one DaemonSet (Collector). + **Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. + **Important:** The Console Plugin requires OpenShift 4.19 or later. + ### Central Services Explained | Service | Deployment Type | Description | diff --git a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml index e6317c7cefaa7..b2e3e9ecfbe45 100644 --- a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml @@ -64,6 +64,9 @@ spec: 2. **Secured Cluster Services** - Secured cluster services are placed on each cluster you manage and report back to Central. These services allow users to enforce policies and monitor your OpenShift and Kubernetes clusters. Secured Cluster Services come as two Deployments (Sensor and Admission Controller) and one DaemonSet (Collector). + **Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. + **Important:** The Console Plugin requires OpenShift 4.19 or later. + ### Central Services Explained | Service | Deployment Type | Description | From 52151781544378b3f9b5cbe6f0c27310bfcc8f38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl?= Date: Wed, 28 Jan 2026 20:43:08 +0100 Subject: [PATCH 064/232] perf: don't prune RBAC objects if internal token API was not used (#18729) --- .../internaltokens/service/service_impl.go | 4 + central/pruning/pruning.go | 23 +++++ central/pruning/pruning_test.go | 87 +++++++++++++++++++ 3 files changed, 114 insertions(+) diff --git a/central/auth/internaltokens/service/service_impl.go b/central/auth/internaltokens/service/service_impl.go index f8057a44bd625..3eaa93e8aec81 100644 --- a/central/auth/internaltokens/service/service_impl.go +++ b/central/auth/internaltokens/service/service_impl.go @@ -7,6 +7,7 @@ import ( "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" + "github.com/stackrox/rox/central/pruning" v1 "github.com/stackrox/rox/generated/internalapi/central/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/auth/tokens" @@ -77,6 +78,9 @@ func (s *serviceImpl) GenerateTokenForPermissionsAndScope( ctx context.Context, req *v1.GenerateTokenForPermissionsAndScopeRequest, ) (*v1.GenerateTokenForPermissionsAndScopeResponse, error) { + // Enable dynamic RBAC pruning. + pruning.EnableDynamicRBACPruning() + // Calculate expiry first so we can set it on the RBAC objects. expiresAt, err := s.getExpiresAt(ctx, req) if err != nil { diff --git a/central/pruning/pruning.go b/central/pruning/pruning.go index 02f9db3ae6054..614117ae0ae86 100644 --- a/central/pruning/pruning.go +++ b/central/pruning/pruning.go @@ -2,6 +2,8 @@ package pruning import ( "context" + "sync/atomic" + "testing" "time" "github.com/pkg/errors" @@ -79,8 +81,24 @@ var ( pruneInterval = env.PruneInterval.DurationSetting() orphanWindow = env.PruneOrphanedWindow.DurationSetting() + + // dynamicRBACPruningEnabled controls whether expired dynamic RBAC objects should be pruned. + // It is disabled by default and enabled on the first call to the internaltoken API service. + dynamicRBACPruningEnabled atomic.Bool ) +// EnableDynamicRBACPruning enables pruning of expired dynamic RBAC objects. +// This should be called on requests to the internaltoken API service. +func EnableDynamicRBACPruning() { + dynamicRBACPruningEnabled.Store(true) +} + +// disableDynamicRBACPruningForTest disables pruning of expired dynamic RBAC objects. +// This is for testing purposes only. +func disableDynamicRBACPruningForTest(*testing.T) { + dynamicRBACPruningEnabled.Store(false) +} + // GarbageCollector implements a generic garbage collection mechanism. type GarbageCollector interface { Start() @@ -1176,6 +1194,11 @@ func isExpired(traits *storage.Traits, now time.Time) bool { // that have an expiry timestamp in the past and have IMPERATIVE origin. // These objects are created dynamically by the internal token API for sensors. func (g *garbageCollectorImpl) removeExpiredDynamicRBACObjects() { + // Check if dynamic RBAC pruning is enabled. + if !dynamicRBACPruningEnabled.Load() { + return + } + defer metrics.SetPruningDuration(time.Now(), "DynamicRBACObjects") now := time.Now() diff --git a/central/pruning/pruning_test.go b/central/pruning/pruning_test.go index 76ad57f4944b1..517fa32fb0db3 100644 --- a/central/pruning/pruning_test.go +++ b/central/pruning/pruning_test.go @@ -2331,6 +2331,8 @@ func (s *PruningTestSuite) TestPruneOrphanedNodeCVEs() { } func (s *PruningTestSuite) TestRemoveExpiredDynamicRBACObjects() { + EnableDynamicRBACPruning() + now := time.Now() yesterday := now.Add(-24 * time.Hour) tomorrow := now.Add(24 * time.Hour) @@ -2615,6 +2617,91 @@ func (s *PruningTestSuite) TestRemoveExpiredDynamicRBACObjects() { } } +func (s *PruningTestSuite) TestRemoveExpiredDynamicRBACObjects_WhenDisabled() { + disableDynamicRBACPruningForTest(s.T()) + + now := time.Now() + yesterday := now.Add(-24 * time.Hour) + + // Create classic (non-expiring) access scope and permission set for roles to reference. + classicPS := &storage.PermissionSet{ + Id: uuid.NewV4().String(), + Name: "classic-ps-disabled", + ResourceToAccess: map[string]storage.Access{"Cluster": storage.Access_READ_ACCESS}, + Traits: nil, + } + classicAS := &storage.SimpleAccessScope{ + Id: uuid.NewV4().String(), + Name: "classic-as-disabled", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: nil, + } + + roleStore := roleDataStore.GetTestPostgresDataStore(s.T(), s.pool) + + // Add classic permission set and access scope for roles to reference. + s.Require().NoError(roleStore.AddPermissionSet(pruningCtx, classicPS)) + s.Require().NoError(roleStore.AddAccessScope(pruningCtx, classicAS)) + s.T().Cleanup(func() { + _ = roleStore.RemovePermissionSet(pruningCtx, classicPS.GetId()) + _ = roleStore.RemoveAccessScope(pruningCtx, classicAS.GetId()) + }) + + // Create expired RBAC objects. + expiredRole := &storage.Role{ + Name: "expired-role-disabled", + PermissionSetId: classicPS.GetId(), + AccessScopeId: classicAS.GetId(), + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + } + expiredPS := &storage.PermissionSet{ + Id: uuid.NewV4().String(), + Name: "expired-ps-disabled", + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + } + expiredAS := &storage.SimpleAccessScope{ + Id: uuid.NewV4().String(), + Name: "expired-as-disabled", + Rules: &storage.SimpleAccessScope_Rules{}, + Traits: &storage.Traits{ + ExpiresAt: timestamppb.New(yesterday), + }, + } + + s.Require().NoError(roleStore.AddRole(pruningCtx, expiredRole)) + s.Require().NoError(roleStore.AddPermissionSet(pruningCtx, expiredPS)) + s.Require().NoError(roleStore.AddAccessScope(pruningCtx, expiredAS)) + s.T().Cleanup(func() { + _ = roleStore.RemoveRole(pruningCtx, expiredRole.GetName()) + _ = roleStore.RemovePermissionSet(pruningCtx, expiredPS.GetId()) + _ = roleStore.RemoveAccessScope(pruningCtx, expiredAS.GetId()) + }) + + gc := &garbageCollectorImpl{ + roleStore: roleStore, + } + + // Call pruning - it should be a no-op since pruning is disabled. + gc.removeExpiredDynamicRBACObjects() + + // Verify that none of the expired objects were deleted. + _, ok, err := roleStore.GetRole(pruningCtx, expiredRole.GetName()) + s.NoError(err) + s.True(ok, "expired role should still exist when pruning is disabled") + + _, ok, err = roleStore.GetPermissionSet(pruningCtx, expiredPS.GetId()) + s.NoError(err) + s.True(ok, "expired permission set should still exist when pruning is disabled") + + _, ok, err = roleStore.GetAccessScope(pruningCtx, expiredAS.GetId()) + s.NoError(err) + s.True(ok, "expired access scope should still exist when pruning is disabled") +} + func (s *PruningTestSuite) addSomePods(podDS podDatastore.DataStore, clusterID string, numberPods int) { for i := 0; i < numberPods; i++ { pod := &storage.Pod{ From 19e187fbbfad58f1194caec210c7c4a0d6236ffa Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Wed, 28 Jan 2026 20:44:00 +0100 Subject: [PATCH 065/232] perf: parallelize subject access review (#18727) --- sensor/common/centralproxy/authorizer.go | 29 ++++++++++++++----- sensor/common/centralproxy/authorizer_test.go | 7 +++-- sensor/common/centralproxy/handler.go | 9 +++++- 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/sensor/common/centralproxy/authorizer.go b/sensor/common/centralproxy/authorizer.go index 6141544e0a042..ad106dc8a8dba 100644 --- a/sensor/common/centralproxy/authorizer.go +++ b/sensor/common/centralproxy/authorizer.go @@ -11,6 +11,7 @@ import ( "github.com/stackrox/rox/pkg/grpc/authn" pkghttputil "github.com/stackrox/rox/pkg/httputil" "github.com/stackrox/rox/pkg/telemetry/phonehome" + "golang.org/x/sync/errgroup" authenticationv1 "k8s.io/api/authentication/v1" authv1 "k8s.io/api/authorization/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -154,6 +155,8 @@ func (a *k8sAuthorizer) validateToken(ctx context.Context, token string) (*authe // access scope limited to the namespace. // - FullClusterAccessScope ("*"): SubjectAccessReview for all namespaces and rox token // with cluster-wide access scope. +// +// SAR checks are performed in parallel to reduce latency. func (a *k8sAuthorizer) authorize(ctx context.Context, userInfo *authenticationv1.UserInfo, r *http.Request) error { namespace := r.Header.Get(stackroxNamespaceHeader) // Skip authorization if the namespace header is empty or not set. @@ -161,18 +164,30 @@ func (a *k8sAuthorizer) authorize(ctx context.Context, userInfo *authenticationv return nil } + // Use errgroup with context cancellation to short-circuit on first error/denial. + g, groupCtx := errgroup.WithContext(ctx) + for _, resource := range a.resourcesToCheck { for _, verb := range a.verbsToCheck { - allowed, err := a.performSubjectAccessReview(ctx, userInfo, verb, namespace, resource) - if err != nil { - return pkghttputil.Errorf(http.StatusInternalServerError, "checking %s permission for %s: %v", verb, resource.Resource, err) - } - if !allowed { - return formatForbiddenErr(userInfo.Username, verb, resource.Resource, resource.Group, namespace) - } + // Capture loop variables for the goroutine. + resource := resource + + g.Go(func() error { + allowed, err := a.performSubjectAccessReview(groupCtx, userInfo, verb, namespace, resource) + if err != nil { + return pkghttputil.Errorf(http.StatusInternalServerError, "checking %s permission for %s: %v", verb, resource.Resource, err) + } + if !allowed { + return formatForbiddenErr(userInfo.Username, verb, resource.Resource, resource.Group, namespace) + } + return nil + }) } } + if err := g.Wait(); err != nil { + return err //nolint:wrapcheck + } return nil } diff --git a/sensor/common/centralproxy/authorizer_test.go b/sensor/common/centralproxy/authorizer_test.go index d0307a08c1140..f1e44cea3ad16 100644 --- a/sensor/common/centralproxy/authorizer_test.go +++ b/sensor/common/centralproxy/authorizer_test.go @@ -196,7 +196,8 @@ func TestK8sAuthorizer_MissingPermission_Namespace(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), `user "limited-user" lacks LIST permission for resource "pods.core" in namespace "my-namespace"`) + // With parallel execution, any resource could fail first - check for the general pattern. + assert.Contains(t, err.Error(), `user "limited-user" lacks LIST permission for resource`) } func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { @@ -231,7 +232,8 @@ func TestK8sAuthorizer_MissingPermission_ClusterWide(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), `user "namespace-admin" lacks cluster-wide LIST permission for resource "pods.core"`) + // With parallel execution, any resource could fail first - check for the general pattern. + assert.Contains(t, err.Error(), `user "namespace-admin" lacks cluster-wide LIST permission for resource`) } func TestK8sAuthorizer_SubjectAccessReviewError(t *testing.T) { @@ -254,7 +256,6 @@ func TestK8sAuthorizer_SubjectAccessReviewError(t *testing.T) { err := authorizer.authorize(context.Background(), userInfo, req) assert.Error(t, err) - assert.Contains(t, err.Error(), "checking get permission") assert.Contains(t, err.Error(), "API server unavailable") } diff --git a/sensor/common/centralproxy/handler.go b/sensor/common/centralproxy/handler.go index a59d6df632771..f9348a7923c04 100644 --- a/sensor/common/centralproxy/handler.go +++ b/sensor/common/centralproxy/handler.go @@ -22,7 +22,9 @@ import ( ) var ( - log = logging.LoggerForModule() + log = logging.LoggerForModule() + k8sClientQPS = 50.0 + k8sClientBurst = 100 _ common.Notifiable = (*Handler)(nil) _ common.CentralGRPCConnAware = (*Handler)(nil) @@ -74,6 +76,11 @@ func NewProxyHandler(centralEndpoint string, centralCertificates []*x509.Certifi if err != nil { return nil, errors.Wrap(err, "getting in-cluster config") } + // Set QPS and Burst to avoid client-side throttling. + // The default k8s client-go values (QPS=5, Burst=10) are quite conservative + // and can cause "client-side throttling, not priority and fairness" warnings. + restConfig.QPS = float32(k8sClientQPS) + restConfig.Burst = k8sClientBurst retryablehttp.ConfigureRESTConfig(restConfig) k8sClient, err := kubernetes.NewForConfig(restConfig) From 73d13ab17c1af6b835dadd2079d358636671e157 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Wed, 28 Jan 2026 21:07:00 +0100 Subject: [PATCH 066/232] chore(deps): revert bump wheel from 0.45.1 to 0.46.2 (#18735) --- operator/bundle_helpers/requirements-build.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/operator/bundle_helpers/requirements-build.txt b/operator/bundle_helpers/requirements-build.txt index f9aa46337a0dc..5f7bb0f4c1dc5 100644 --- a/operator/bundle_helpers/requirements-build.txt +++ b/operator/bundle_helpers/requirements-build.txt @@ -74,9 +74,9 @@ flit-core==3.10.1 \ --hash=sha256:66e5b87874a0d6e39691f0e22f09306736b633548670ad3c09ec9db03c5662f7 \ --hash=sha256:cb31a76e8b31ad3351bb89e531f64ef2b05d1e65bd939183250bf81ddf4922a8 # via -r requirements-build.in -wheel==0.46.2 \ - --hash=sha256:33ae60725d69eaa249bc1982e739943c23b34b58d51f1cb6253453773aca6e65 \ - --hash=sha256:3d79e48fde9847618a5a181f3cc35764c349c752e2fe911e65fa17faab9809b0 +wheel==0.45.1 \ + --hash=sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729 \ + --hash=sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248 # via -r requirements-build.in # The following packages are considered to be unsafe in a requirements file: From 816a3911e0f5787f2017b64851afa64b795c10f6 Mon Sep 17 00:00:00 2001 From: David Vail Date: Wed, 28 Jan 2026 15:42:07 -0500 Subject: [PATCH 067/232] fix(ui): Increase readability of server error messages in plugin (#18736) --- .../platform/src/utils/responseErrorUtils.ts | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/ui/apps/platform/src/utils/responseErrorUtils.ts b/ui/apps/platform/src/utils/responseErrorUtils.ts index 8cd46e2f3624e..a22ed9b8a5532 100644 --- a/ui/apps/platform/src/utils/responseErrorUtils.ts +++ b/ui/apps/platform/src/utils/responseErrorUtils.ts @@ -8,6 +8,17 @@ function isAxiosError(error: Error): error is AxiosError<{ message?: string }> { ); } +const commonStatusCodeNameMap = { + 401: 'Unauthorized', + 403: 'Forbidden', + 404: 'Not Found', + 500: 'Internal Server Error', + 501: 'Not Implemented', + 502: 'Bad Gateway', + 503: 'Service Unavailable', + 504: 'Gateway Timeout', +} as const; + /* * Given argument of promise-catch method or try-catch block for an axios call, * return error message. @@ -23,7 +34,11 @@ export function getAxiosErrorMessage(error: unknown): string { 'result' in error.networkError && typeof error.networkError.result === 'string' ) { - return `${error.networkError.message} [${error.networkError.result}]`; + // Display a user-friendly error message for common HTTP status codes, falling back to + // the error name for less common codes + const name = + commonStatusCodeNameMap[error.networkError.statusCode] ?? error.networkError.name; + return `${name}: ${error.networkError.result}`; } if (isAxiosError(error)) { From de381c617f41146109cb72170f6de309fda8d415 Mon Sep 17 00:00:00 2001 From: Brad Rogers <61400697+bradr5@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:59:30 -0600 Subject: [PATCH 068/232] ROX-31574: Vm4vm bug bash fixes (#18700) --- .../workloadCves/WorkloadCves.selectors.ts | 2 +- .../VirtualMachineCvesOverviewPage.tsx | 2 +- .../Overview/VirtualMachinesCvesTable.tsx | 91 +++++++------------ .../VirtualMachinePageComponents.tsx | 56 +++--------- .../VirtualMachinePageHeader.tsx | 3 + .../VirtualMachinePageVulnerabilities.tsx | 72 +++++---------- .../components/SeverityCountLabels.tsx | 2 +- 7 files changed, 74 insertions(+), 154 deletions(-) diff --git a/ui/apps/platform/cypress/integration/vulnerabilities/workloadCves/WorkloadCves.selectors.ts b/ui/apps/platform/cypress/integration/vulnerabilities/workloadCves/WorkloadCves.selectors.ts index f5cde8aebcdb1..9ef5847d04591 100644 --- a/ui/apps/platform/cypress/integration/vulnerabilities/workloadCves/WorkloadCves.selectors.ts +++ b/ui/apps/platform/cypress/integration/vulnerabilities/workloadCves/WorkloadCves.selectors.ts @@ -77,7 +77,7 @@ export const selectors = { nonZeroImageSeverityCounts: 'td[data-label="Images by severity"] *[aria-label$="severity"i]:not([aria-label^="0"])', nonZeroCveSeverityCount: (severity) => - `span[aria-label*="${severity.toLowerCase()} severity cve count across this"]`, + `span[aria-label*="${severity.toLowerCase()} severity CVE count across this"]`, nonZeroImageSeverityCount: (severity) => `span[aria-label*="with ${severity.toLowerCase()} severity"]`, hiddenSeverityCount: `span[aria-label$="severity is hidden by the applied filter"]`, diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachineCvesOverviewPage.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachineCvesOverviewPage.tsx index 6b36c77ffa95b..8e0842108020b 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachineCvesOverviewPage.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachineCvesOverviewPage.tsx @@ -13,7 +13,7 @@ function VirtualMachineCvesOverviewPage() { - Virtual Machine Vulnerabilities + Virtual machine vulnerabilities Prioritize and remediate observed CVEs across virtual machines diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx index 3658bb24b9c1d..4437d7a24aa14 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/Overview/VirtualMachinesCvesTable.tsx @@ -1,19 +1,10 @@ import { useCallback } from 'react'; import { Link } from 'react-router-dom-v5-compat'; -import { - Flex, - Pagination, - Skeleton, - Split, - SplitItem, - Title, - pluralize, -} from '@patternfly/react-core'; -import { Table, Tbody, Td, Th, Thead, Tr } from '@patternfly/react-table'; +import { Flex, FlexItem, Pagination } from '@patternfly/react-core'; +import { InnerScrollContainer, Table, Tbody, Td, Th, Thead, Tr } from '@patternfly/react-table'; import ColumnManagementButton from 'Components/ColumnManagementButton'; import DateDistance from 'Components/DateDistance'; -import { DynamicTableLabel } from 'Components/DynamicIcon'; import TbodyUnified from 'Components/TableStateTemplates/TbodyUnified'; import { generateVisibilityForColumns, @@ -26,7 +17,6 @@ import useURLSearch from 'hooks/useURLSearch'; import useURLSort from 'hooks/useURLSort'; import { listVirtualMachines } from 'services/VirtualMachineService'; import { getTableUIState } from 'utils/getTableUIState'; -import { getHasSearchApplied } from 'utils/searchUtils'; import { getVirtualMachineScannedComponentsCount, @@ -85,7 +75,6 @@ function VirtualMachinesCvesTable() { const managedColumnState = useManagedColumns('VirtualMachinesCvesTable', defaultColumns); const { page, perPage, setPage, setPerPage } = useURLPagination(DEFAULT_VM_PAGE_SIZE); const { searchFilter, setSearchFilter } = useURLSearch(); - const isFiltered = getHasSearchApplied(searchFilter); const { sortOption, getSortParams } = useURLSort({ sortFields, defaultSortOption, @@ -110,50 +99,35 @@ function VirtualMachinesCvesTable() { return ( <> - { - setSearchFilter(newFilter); - setPage(1, 'replace'); - }} - /> -

- - - - - {!isLoading ? ( - `${pluralize(data?.totalCount ?? 0, 'result')} found` - ) : ( - <Skeleton screenreaderText="Loading virtual machine count" /> - )} - - {isFiltered && } - - - - - - - setPage(newPage)} - onPerPageSelect={(_, newPerPage) => { - setPerPage(newPerPage); - }} - /> - - + + + { + setSearchFilter(newFilter); + setPage(1, 'replace'); + }} + /> + + + setPage(newPage)} + onPerPageSelect={(_, newPerPage) => { + setPerPage(newPerPage); + }} + /> + + Virtual machine @@ -271,7 +246,7 @@ function VirtualMachinesCvesTable() { )} />
-
+ ); } diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx index 6b9021bc34bb2..e7b755add9ecd 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageComponents.tsx @@ -1,17 +1,11 @@ import { useMemo } from 'react'; import { - Flex, PageSection, Pagination, - Skeleton, - Split, - SplitItem, - Title, Toolbar, ToolbarContent, ToolbarGroup, ToolbarItem, - pluralize, } from '@patternfly/react-core'; import CompoundSearchFilter from 'Components/CompoundSearchFilter/components/CompoundSearchFilter'; @@ -22,13 +16,11 @@ import type { } from 'Components/CompoundSearchFilter/types'; import SearchFilterSelectInclusive from 'Components/CompoundSearchFilter/components/SearchFilterSelectInclusive'; import { updateSearchFilter } from 'Components/CompoundSearchFilter/utils/utils'; -import { DynamicTableLabel } from 'Components/DynamicIcon'; import type { UseURLPaginationResult } from 'hooks/useURLPagination'; import type { UseUrlSearchReturn } from 'hooks/useURLSearch'; import type { UseURLSortResult } from 'hooks/useURLSort'; import type { VirtualMachine } from 'services/VirtualMachineService'; import { getTableUIState } from 'utils/getTableUIState'; -import { getHasSearchApplied } from 'utils/searchUtils'; import { applyVirtualMachineComponentsTableFilters, @@ -73,8 +65,6 @@ function VirtualMachinePageComponents({ const { page, perPage, setPage, setPerPage } = urlPagination; const { sortOption, getSortParams } = urlSorting; - const isFiltered = getHasSearchApplied(searchFilter); - const virtualMachineComponentsTableData = useMemo( () => getVirtualMachineComponentsTableData(virtualMachine), [virtualMachine] @@ -162,38 +152,20 @@ function VirtualMachinePageComponents({ -
- - - - - {!isLoadingVirtualMachine ? ( - `${pluralize(filteredVirtualMachineComponentsTableData.length, 'result')} found` - ) : ( - <Skeleton screenreaderText="Loading virtual machine vulnerability count" /> - )} - - {isFiltered && } - - - - setPage(newPage)} - onPerPageSelect={(_, newPerPage) => { - setPerPage(newPerPage); - }} - /> - - - -
+ setPage(newPage)} + onPerPageSelect={(_, newPerPage) => { + setPerPage(newPerPage); + }} + /> +
); } diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx index a05445314851b..df3a888ef2161 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageHeader.tsx @@ -57,6 +57,9 @@ function VirtualMachinePageHeader({ {virtualMachine.scan?.scanTime && ( )} + {virtualMachine?.facts?.guestOS && ( + + )}
); diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx index 27aff86290a52..304d222a1baf7 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/VirtualMachineCves/VirtualMachine/VirtualMachinePageVulnerabilities.tsx @@ -1,16 +1,6 @@ import { useMemo } from 'react'; -import { - Flex, - PageSection, - Pagination, - Skeleton, - Split, - SplitItem, - Title, - pluralize, -} from '@patternfly/react-core'; +import { Flex, PageSection, Pagination } from '@patternfly/react-core'; -import { DynamicTableLabel } from 'Components/DynamicIcon'; import ColumnManagementButton from 'Components/ColumnManagementButton'; import type { UseURLPaginationResult } from 'hooks/useURLPagination'; import type { UseUrlSearchReturn } from 'hooks/useURLSearch'; @@ -19,7 +9,6 @@ import { useManagedColumns } from 'hooks/useManagedColumns'; import type { VirtualMachine } from 'services/VirtualMachineService'; import { getTableUIState } from 'utils/getTableUIState'; -import { getHasSearchApplied } from 'utils/searchUtils'; import { applyVirtualMachineCveTableFilters, applyVirtualMachineCveTableSort, @@ -75,7 +64,6 @@ function VirtualMachinePageVulnerabilities({ const querySearchFilter = parseQuerySearchFilter(searchFilter); const hiddenStatuses = getHiddenStatuses(querySearchFilter); const hiddenSeverities = getHiddenSeverities(querySearchFilter); - const isFiltered = getHasSearchApplied(searchFilter); const managedColumnState = useManagedColumns(tableId, defaultColumns); @@ -157,45 +145,27 @@ function VirtualMachinePageVulnerabilities({ )} />
-
- - - - - {!isLoadingVirtualMachine ? ( - `${pluralize(filteredVirtualMachineTableData.length, 'result')} found` - ) : ( - <Skeleton screenreaderText="Loading virtual machine vulnerability count" /> - )} - - {isFiltered && } - - - - - - - setPage(newPage)} - onPerPageSelect={(_, newPerPage) => { - setPerPage(newPerPage); - }} - /> - - - + -
+ setPage(newPage)} + onPerPageSelect={(_, newPerPage) => { + setPerPage(newPerPage); + }} + /> + + ); } diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/components/SeverityCountLabels.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/components/SeverityCountLabels.tsx index efbd15fdc6306..66cb1d1aa2941 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/components/SeverityCountLabels.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/components/SeverityCountLabels.tsx @@ -23,7 +23,7 @@ function getTooltipContent(severity: string, severityCount?: number, entity?: st return `${capitalize(severity)} severity is hidden by the applied filter`; } if (entity) { - return `${severityCount} ${severity} severity cve count across this ${entity}`; + return `${severityCount} ${severity} severity CVE count across this ${entity}`; } return `Image count with ${severity} severity`; } From ac5653cba18eafdd9549990ecc227d29bde478a5 Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Wed, 28 Jan 2026 23:47:14 +0100 Subject: [PATCH 069/232] ROX-32630: Enable ConsolePlugin (#18605) --- CHANGELOG.md | 1 + .../manifests/rhacs-operator.clusterserviceversion.yaml | 1 + .../bases/rhacs-operator.clusterserviceversion.yaml | 1 + .../bases/rhacs-operator.clusterserviceversion.yaml | 1 + pkg/features/list.go | 2 +- sensor/common/sensor/sensor.go | 2 +- sensor/kubernetes/listener/resources/clusteroperator.go | 9 +++++++++ tests/e2e/lib.sh | 3 +++ 8 files changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61c9e1aa6da8d..9f267c6769d6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc - ROX-30094, ROX-30610, ROX-30740: Add new namespaces to Layered Products default config regex. - ROX-31960, ROX-32449: include and exclude filters for custom metrics. - ROX-30641: Added a new policy criteria "Days Since CVE Fix Was Available". +- ROX-32630: The OpenShift console plugin integrates the ACS vulnerability management view into OpenShift console. It is enabled by default for operator-deployed secured clusters. - Tech preview: operator-based installation available for community StackRox build. More information in [a separate README file](operator/install/README.md). - ROX-30585, ROX-30196 (Tech Preview): Added file activity monitoring, including new policy criteria for deployment or node file activity. diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index 66fc06d0c03fc..41743609ee1f5 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -13,6 +13,7 @@ metadata: : \"my-cluster\"\n }\n }\n]" capabilities: Seamless Upgrades categories: Security + console.openshift.io/plugins: '["advanced-cluster-security"]' containerImage: controller:latest createdAt: '' description: Red Hat Advanced Cluster Security (RHACS) operator provisions the diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index 10dd4a701b801..967ab805dcf3f 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -4,6 +4,7 @@ metadata: annotations: capabilities: Seamless Upgrades categories: Security + console.openshift.io/plugins: '["advanced-cluster-security"]' containerImage: controller:latest createdAt: "1999-12-31T23:59:59Z" description: Red Hat Advanced Cluster Security (RHACS) operator provisions the diff --git a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml index b2e3e9ecfbe45..996390bf38243 100644 --- a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml @@ -4,6 +4,7 @@ metadata: annotations: capabilities: Seamless Upgrades categories: Security + console.openshift.io/plugins: '["advanced-cluster-security"]' containerImage: controller:latest createdAt: "1999-12-31T23:59:59Z" description: Red Hat Advanced Cluster Security (RHACS) operator provisions the diff --git a/pkg/features/list.go b/pkg/features/list.go index 61658416e8d06..1bf3a2d4e7823 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -120,7 +120,7 @@ var ( DelegatedBaseImageScanning = registerFeature("Enable delegated base image scanning to secured clusters", "ROX_DELEGATED_BASE_IMAGE_SCANNING") // OCPConsoleIntegration enables the /proxy/central API in Sensor that forwards requests to Central. - OCPConsoleIntegration = registerFeature("Enable features related to support the integration of ACS into the OpenShift console", "ROX_OCP_CONSOLE_INTEGRATION") + OCPConsoleIntegration = registerFeature("Enable features related to support the integration of ACS into the OpenShift console", "ROX_OCP_CONSOLE_INTEGRATION", enabled) // SensorInternalPubSub enables the internal pubsub system in sensor SensorInternalPubSub = registerFeature("Enables the internal PubSub system in Sensor", "ROX_SENSOR_PUBSUB", enabled) diff --git a/sensor/common/sensor/sensor.go b/sensor/common/sensor/sensor.go index acdcefd9ade93..8debefab721d7 100644 --- a/sensor/common/sensor/sensor.go +++ b/sensor/common/sensor/sensor.go @@ -247,7 +247,7 @@ func (s *Sensor) Start() { // Enable proxy endpoint for forwarding requests to Central on OpenShift. // The proxy is served on a dedicated HTTPS server with a service CA signed certificate. - if features.OCPConsoleIntegration.Enabled() && env.OpenshiftAPI.Setting() != "" { + if features.OCPConsoleIntegration.Enabled() && env.OpenshiftAPI.BooleanSetting() { handler, err := centralproxy.NewProxyHandler(s.centralEndpoint, centralCertificates, s.clusterID) if err != nil { utils.Should(errors.Wrap(err, "creating central proxy handler")) diff --git a/sensor/kubernetes/listener/resources/clusteroperator.go b/sensor/kubernetes/listener/resources/clusteroperator.go index 31356702aecf5..76046067d78d0 100644 --- a/sensor/kubernetes/listener/resources/clusteroperator.go +++ b/sensor/kubernetes/listener/resources/clusteroperator.go @@ -3,6 +3,7 @@ package resources import ( v1 "github.com/openshift/api/config/v1" "github.com/stackrox/rox/generated/internalapi/central" + "github.com/stackrox/rox/pkg/pods" "github.com/stackrox/rox/sensor/kubernetes/eventpipeline/component" "github.com/stackrox/rox/sensor/kubernetes/orchestratornamespaces" ) @@ -38,8 +39,16 @@ func (c *clusterOperatorDispatcher) ProcessEvent(obj, _ interface{}, _ central.R namespace: openshift-machine-api resource: machines */ + sensorNamespace := pods.GetPodNamespace() for _, obj := range clusterOperator.Status.RelatedObjects { if obj.Resource == "namespaces" { + // Skip sensor's own namespace to avoid marking StackRox components as orchestrator components. + // This can happen when a ConsolePlugin references the sensor namespace, causing the + // console-operator to add it to its relatedObjects. + if obj.Name == sensorNamespace { + log.Debugf("Skipping sensor namespace %s from orchestrator namespace map", obj.Name) + continue + } log.Debugf("Adding namespace %s to orchestrator namespace map", obj.Name) c.orchestratorNamespaces.AddNamespace(obj.Name) } diff --git a/tests/e2e/lib.sh b/tests/e2e/lib.sh index e373787fd7fe3..2b088a5689dd4 100755 --- a/tests/e2e/lib.sh +++ b/tests/e2e/lib.sh @@ -990,6 +990,9 @@ remove_existing_stackrox_resources() { if echo "${k8s_api_resources}" | grep -q "^securitycontextconstraints\.security\.openshift\.io$"; then resource_types="${resource_types},SecurityContextConstraints" fi + if echo "${k8s_api_resources}" | grep -q "^consoleplugins\.console\.openshift\.io$"; then + global_resource_types="${global_resource_types},consoleplugins.console.openshift.io" + fi if echo "${k8s_api_resources}" | grep -q "^podsecuritypolicies\.policy$"; then psps_supported=true global_resource_types="${global_resource_types},psp" From 78ce053f5f5ac4e3f9a4cddb4ce9bbf5e1f6c425 Mon Sep 17 00:00:00 2001 From: Brad Rogers <61400697+bradr5@users.noreply.github.com> Date: Wed, 28 Jan 2026 19:05:18 -0600 Subject: [PATCH 070/232] fix(ui): vulnerability breadcrumbs (#18741) --- .../WorkloadCves/WorkloadCvesPage.tsx | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/WorkloadCvesPage.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/WorkloadCvesPage.tsx index 345cdf697513e..53eacbaf69db1 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/WorkloadCvesPage.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/WorkloadCvesPage.tsx @@ -17,6 +17,7 @@ import ScannerV4IntegrationBanner from 'Components/ScannerV4IntegrationBanner'; import useFeatureFlags from 'hooks/useFeatureFlags'; import type { IsFeatureFlagEnabled } from 'hooks/useFeatureFlags'; import usePermissions from 'hooks/usePermissions'; +import { getQueryString } from 'utils/queryStringUtils'; import { getUrlQueryStringForSearchFilter } from 'utils/searchUtils'; import type { NonEmptyArray } from 'utils/type.utils'; import type { VulnerabilityState } from 'types/cve.proto'; @@ -30,7 +31,7 @@ import { WorkloadCveViewContext } from './WorkloadCveViewContext'; import type { WorkloadCveView } from './WorkloadCveViewContext'; import type { QuerySearchFilter, WorkloadEntityTab } from '../types'; -import { getOverviewPagePath, getWorkloadEntityPagePath } from '../utils/searchUtils'; +import { getWorkloadEntityPagePath } from '../utils/searchUtils'; export const userWorkloadViewId = 'user-workloads'; export const platformViewId = 'platform'; @@ -40,28 +41,22 @@ export const imagesWithoutCvesViewId = 'images-without-cves'; function getUrlBuilder(viewId: string): WorkloadCveView['urlBuilder'] { let urlRoot = ''; - let cveBase: 'Workload' | 'Node' | 'Platform' = 'Workload'; switch (viewId) { case userWorkloadViewId: urlRoot = vulnerabilitiesUserWorkloadsPath; - cveBase = 'Workload'; break; case platformViewId: urlRoot = vulnerabilitiesPlatformPath; - cveBase = 'Platform'; break; case allImagesViewId: urlRoot = vulnerabilitiesAllImagesPath; - cveBase = 'Workload'; break; case inactiveImagesViewId: urlRoot = vulnerabilitiesInactiveImagesPath; - cveBase = 'Workload'; break; case imagesWithoutCvesViewId: urlRoot = vulnerabilitiesImagesWithoutCvesPath; - cveBase = 'Workload'; break; default: // TODO Handle user-defined views, or error @@ -75,19 +70,15 @@ function getUrlBuilder(viewId: string): WorkloadCveView['urlBuilder'] { return { vulnMgmtBase: getAbsoluteUrl, cveList: (vulnerabilityState: VulnerabilityState) => - getAbsoluteUrl(getOverviewPagePath(cveBase, { vulnerabilityState, entityTab: 'CVE' })), + `${urlRoot}${getQueryString({ vulnerabilityState, entityTab: 'CVE' })}`, cveDetails: (cve: string, vulnerabilityState: VulnerabilityState) => getAbsoluteUrl(getWorkloadEntityPagePath('CVE', cve, vulnerabilityState)), imageList: (vulnerabilityState: VulnerabilityState) => - getAbsoluteUrl( - getOverviewPagePath(cveBase, { vulnerabilityState, entityTab: 'Image' }) - ), + `${urlRoot}${getQueryString({ vulnerabilityState, entityTab: 'Image' })}`, imageDetails: (id: string, vulnerabilityState: VulnerabilityState) => getAbsoluteUrl(getWorkloadEntityPagePath('Image', id, vulnerabilityState)), workloadList: (vulnerabilityState: VulnerabilityState) => - getAbsoluteUrl( - getOverviewPagePath(cveBase, { vulnerabilityState, entityTab: 'Deployment' }) - ), + `${urlRoot}${getQueryString({ vulnerabilityState, entityTab: 'Deployment' })}`, workloadDetails: ( workload: { id: string; From 05f5768b061357e710c6d1afc760591c8b0a4c0c Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 29 Jan 2026 13:51:47 +0100 Subject: [PATCH 071/232] chore(operator): cleanups (#18750) --- .github/workflows/fixxxer.yaml | 3 +++ .../config/default/kustomization.yaml | 7 ++----- operator/README.md | 15 +++++++++++---- .../flavors/development_build/kustomization.yaml | 7 ++----- .../config/flavors/opensource/kustomization.yaml | 7 ++----- operator/install/README.md | 4 ++-- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/.github/workflows/fixxxer.yaml b/.github/workflows/fixxxer.yaml index 94ec15650b54d..f562e9ebe7eba 100644 --- a/.github/workflows/fixxxer.yaml +++ b/.github/workflows/fixxxer.yaml @@ -7,6 +7,9 @@ on: permissions: contents: write +env: + ROX_PRODUCT_BRANDING: RHACS_BRANDING + jobs: pr_commented: name: Run PR Fixxxer diff --git a/config-controller/config/default/kustomization.yaml b/config-controller/config/default/kustomization.yaml index 44ca30fe6a803..3afeeb23b0fae 100644 --- a/config-controller/config/default/kustomization.yaml +++ b/config-controller/config/default/kustomization.yaml @@ -1,11 +1,8 @@ # Adds namespace to all resources. namespace: config-controller-system -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. +# Value of this field is prepended to the names of all resources. +# Note that it should also match with the prefix (text before '-system') of the namespace field above. namePrefix: config-controller- # Labels to add to all resources and selectors. diff --git a/operator/README.md b/operator/README.md index 7855ac223b4ab..273efc6748165 100644 --- a/operator/README.md +++ b/operator/README.md @@ -133,7 +133,13 @@ The recommended approach is the following. $ make undeploy uninstall ``` -### Bundling +### Building and using the operator bundle + +*Note:* currently creating a bundle is only supported using the RH ACS branding (ROX-11744). +You need to have the following set before running most targets mentioned in this section. +```bash +$ export ROX_PRODUCT_BRANDING=RHACS_BRANDING +``` ```bash # Refresh bundle metadata. Make sure to check the diff and commit it. @@ -159,19 +165,20 @@ $ make bundle-test $ make bundle-test-image ``` -### Launch the operator on the cluster with OLM and the bundle +#### Launching the operator on the cluster with OLM and the bundle ```bash # 0. Get the operator-sdk program. $ make operator-sdk -# 1. Install OLM. +# 1. Install OLM, unless running on OpenShift. $ make olm-install # 2. Create a namespace for testing bundle. $ kubectl create ns bundle-test # 2. Create image pull secrets. +# You can skip this and the next patch step when using cluster from infra.rox.systems. # If the inner magic does not work, just provide --docker-username and --docker-password with your DockerHub creds. $ kubectl -n bundle-test create secret docker-registry my-opm-image-pull-secrets \ --docker-server=https://quay.io/v2/ \ @@ -283,7 +290,7 @@ Now the latest version (based off of `make tag`) can be installed like so: ROX_PRODUCT_BRANDING=RHACS_BRANDING make deploy-via-olm ``` -This installs the operator into the `rhacs-operator-system` namespace. +This installs the operator into the `stackrox-operator-system` namespace. This can be overridden with a `TEST_NAMESPACE` argument. The version can be overridden with a `VERSION` argument. diff --git a/operator/config/flavors/development_build/kustomization.yaml b/operator/config/flavors/development_build/kustomization.yaml index 8b4912bf36879..30fa503ff29db 100644 --- a/operator/config/flavors/development_build/kustomization.yaml +++ b/operator/config/flavors/development_build/kustomization.yaml @@ -1,11 +1,8 @@ # Adds namespace to all resources. namespace: rhacs-operator-system -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. +# Value of this field is prepended to the names of all resources. +# Note that it should also match with the prefix (text before '-system') of the namespace field above. namePrefix: rhacs-operator- # Labels to add to all resources and selectors. diff --git a/operator/config/flavors/opensource/kustomization.yaml b/operator/config/flavors/opensource/kustomization.yaml index 23a08b1957b9d..9f187bf137186 100644 --- a/operator/config/flavors/opensource/kustomization.yaml +++ b/operator/config/flavors/opensource/kustomization.yaml @@ -1,11 +1,8 @@ # Adds namespace to all resources. namespace: stackrox-operator-system -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. +# Value of this field is prepended to the names of all resources. +# Note that it should also match with the prefix (text before '-system') of the namespace field above. namePrefix: stackrox-operator- # Labels to add to all resources and selectors. diff --git a/operator/install/README.md b/operator/install/README.md index 6911f515765cc..b37270ac85902 100644 --- a/operator/install/README.md +++ b/operator/install/README.md @@ -21,7 +21,7 @@ kubectl rollout status deployment -n stackrox-operator-system stackrox-operator- ## Where to go from here? -Once the operator is running, to actually deploy StackRox you need to create a `Central` and a `SecuredCluster` custom resource. +Once the operator is running, to actually deploy StackRox you need to create a `Central` and/or a `SecuredCluster` custom resource. Please have a look at the [samples](../config/samples) directory. Before applying the `SecuredCluster` CR you need to retrieve from central and apply on the cluster an init bundle or cluster registration secret. @@ -39,7 +39,7 @@ You may encounter a few references to RH ACS when using the operator in places s - the `UserAgent` header used by the operator controller when talking to the kube API server - central web UI when generating init bundles or cluster registration secrets -We hope to clean these up in the next release. +These will be cleaned up in a future release. ## How was this manifest created? From f091a9a42a68e3d2e962a34772a016944ea8267d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jan 2026 14:04:41 +0100 Subject: [PATCH 072/232] chore(deps): bump github.com/coreos/go-systemd/v22 from 22.6.0 to 22.7.0 (#18714) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 32077fba84acb..65bfed181c227 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/cockroachdb/pebble/v2 v2.1.4 github.com/containers/image/v5 v5.36.2 github.com/coreos/go-oidc/v3 v3.17.0 - github.com/coreos/go-systemd/v22 v22.6.0 + github.com/coreos/go-systemd/v22 v22.7.0 github.com/dave/jennifer v1.7.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/distribution/reference v0.6.0 diff --git a/go.sum b/go.sum index 7858ca10ba5b1..b2fd06160c1d1 100644 --- a/go.sum +++ b/go.sum @@ -454,8 +454,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= -github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= From 74fb9e83efc016c3cbc65d00d7bf0b2a3ac4616c Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 29 Jan 2026 15:24:02 +0100 Subject: [PATCH 073/232] feat(operator): major.minor community operator image (#18751) --- operator/install/README.md | 2 +- operator/install/manifest.yaml | 20 ++++++++++++-------- scripts/ci/lib.sh | 13 +++++++++++++ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/operator/install/README.md b/operator/install/README.md index b37270ac85902..661b05921eafc 100644 --- a/operator/install/README.md +++ b/operator/install/README.md @@ -44,6 +44,6 @@ These will be cleaned up in a future release. ## How was this manifest created? ```shell -BUILD_TAG=4.10.0 ROX_PRODUCT_BRANDING=STACKROX_BRANDING make -C operator/ build-installer +BUILD_TAG=4.10 ROX_PRODUCT_BRANDING=STACKROX_BRANDING make -C operator/ build-installer cp operator/dist/install.yaml operator/install/manifest.yaml ``` diff --git a/operator/install/manifest.yaml b/operator/install/manifest.yaml index 363bcc35cbe5f..880371f119f4b 100644 --- a/operator/install/manifest.yaml +++ b/operator/install/manifest.yaml @@ -2052,8 +2052,8 @@ spec: description: |- Can be specified as "Enabled" or "Disabled". If this field is not specified, the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: Enabled. enum: - Default - Enabled @@ -2239,8 +2239,8 @@ spec: enforcement: description: |- Set to Disabled to disable policy enforcement for the admission controller. This is not recommended. - On new deployments starting with version 4.9, defaults to Enabled. - On old deployments, defaults to Enabled if at least one of listenOnCreates or listenOnUpdates is true. + On upgrades to 4.9 from previous releases, defaults to Enabled only if at least one of listenOnCreates or listenOnUpdates is true. + On new deployments starting with version 4.9, the default is: Enabled. enum: - Enabled - Disabled @@ -3845,10 +3845,14 @@ spec: type: object scannerComponent: description: |- - If you want to enable the Scanner V4 component set this to "AutoSense" + If you want to enable the Scanner V4 component set this to "AutoSense". + A value of "AutoSense" means that Scanner V4 should be installed, + unless there is a Central resource in the same namespace. + In that case typically a central Scanner V4 will be deployed as a component of Central. + A value of "Disabled" means that Scanner V4 should not be installed. If this field is not specified or set to "Default", the following defaulting takes place: - * for new installations, Scanner V4 is enabled starting with ACS 4.8; - * for upgrades to 4.8 from previous releases, Scanner V4 is disabled. + * for upgrades to 4.8 from previous releases, the default is: Disabled; + * for new installations starting with ACS 4.8, the default is: AutoSense. enum: - Default - AutoSense @@ -4599,7 +4603,7 @@ spec: resourceFieldRef: containerName: manager resource: limits.memory - image: quay.io/stackrox-io/stackrox-operator:4.10.0 + image: quay.io/stackrox-io/stackrox-operator:4.10 livenessProbe: httpGet: path: /healthz diff --git a/scripts/ci/lib.sh b/scripts/ci/lib.sh index c4bb6682f913e..eb11a6c2fb63c 100755 --- a/scripts/ci/lib.sh +++ b/scripts/ci/lib.sh @@ -366,6 +366,13 @@ push_operator_image() { docker tag "${registry}/stackrox-operator:${tag}" "${registry}/stackrox-operator:latest-${arch}" _push_operator_image "$registry" "latest" "$arch" fi + + if [[ $tag =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + # For release builds, also push a major.minor tag, see operator/install/README.md + local major_minor="${tag%.*}" + docker tag "${registry}/stackrox-operator:${tag}" "${registry}/stackrox-operator:${major_minor}-${arch}" + _push_operator_image "$registry" "$major_minor" "$arch" + fi } push_scanner_image_manifest_lists() { @@ -453,6 +460,12 @@ push_operator_manifest_lists() { retry 5 true \ "$SCRIPTS_ROOT/scripts/ci/push-as-multiarch-manifest-list.sh" "${registry}/stackrox-operator:latest" "$architectures" | cat fi + if [[ $tag =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + # For release builds, also push a major.minor tag, see operator/install/README.md + local major_minor="${tag%.*}" + retry 5 true \ + "$SCRIPTS_ROOT/scripts/ci/push-as-multiarch-manifest-list.sh" "${registry}/stackrox-operator:${major_minor}" "$architectures" | cat + fi } registry_rw_login() { From c9ecb16a17198cc25638e89f8239828357bbf86a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jan 2026 16:42:16 +0100 Subject: [PATCH 074/232] chore(deps): bump cloud.google.com/go/storage from 1.59.1 to 1.59.2 (#18746) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 65bfed181c227..48a95f626affd 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( cloud.google.com/go/compute/metadata v0.9.0 cloud.google.com/go/containeranalysis v0.14.2 cloud.google.com/go/securitycenter v1.38.1 - cloud.google.com/go/storage v1.59.1 + cloud.google.com/go/storage v1.59.2 dario.cat/mergo v1.0.2 github.com/Azure/azure-sdk-for-go-extensions v0.3.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 diff --git a/go.sum b/go.sum index b2fd06160c1d1..77f021ff5dd42 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.59.1 h1:DXAZLcTimtiXdGqDSnebROVPd9QvRsFVVlptz02Wk58= -cloud.google.com/go/storage v1.59.1/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= +cloud.google.com/go/storage v1.59.2 h1:gmOAuG1opU8YvycMNpP+DvHfT9BfzzK5Cy+arP+Nocw= +cloud.google.com/go/storage v1.59.2/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= From 9316dade73c420be3a337cc6eb9a29415a67cd33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Jan 2026 16:42:44 +0100 Subject: [PATCH 075/232] chore(deps): bump google.golang.org/api from 0.262.0 to 0.263.0 (#18715) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 48a95f626affd..72f5323ce5240 100644 --- a/go.mod +++ b/go.mod @@ -150,7 +150,7 @@ require ( golang.org/x/time v0.14.0 golang.org/x/tools v0.41.0 golang.stackrox.io/grpc-http1 v0.5.1 - google.golang.org/api v0.262.0 + google.golang.org/api v0.263.0 google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b google.golang.org/grpc v1.78.0 @@ -504,7 +504,7 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260120174246-409b4a993575 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect diff --git a/go.sum b/go.sum index 77f021ff5dd42..bd54662e8860e 100644 --- a/go.sum +++ b/go.sum @@ -2176,8 +2176,8 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.262.0 h1:4B+3u8He2GwyN8St3Jhnd3XRHlIvc//sBmgHSp78oNY= -google.golang.org/api v0.262.0/go.mod h1:jNwmH8BgUBJ/VrUG6/lIl9YiildyLd09r9ZLHiQ6cGI= +google.golang.org/api v0.263.0 h1:UFs7qn8gInIdtk1ZA6eXRXp5JDAnS4x9VRsRVCeKdbk= +google.golang.org/api v0.263.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2253,8 +2253,8 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260120174246-409b4a993575 h1:vzOYHDZEHIsPYYnaSYo60AqHkJronSu0rzTz/s4quL0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260120174246-409b4a993575/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From 7dc812fe697a425dd7f9a7fdbacc9d2b01f045e7 Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Thu, 29 Jan 2026 16:04:23 +0000 Subject: [PATCH 076/232] chore(tests): Use b.Loop (#17491) Signed-off-by: Tomasz Janiszewski --- .../aggregation/aggregation_test.go | 3 +- .../datastore/datastore_bench_test.go | 4 +- .../deploytime/default_policies_bench_test.go | 4 +- .../resolvers/image_scan_benchmark_test.go | 16 ++-- .../resolvers/node_scan_benchmark_test.go | 12 +-- .../entity/datastore/datastore_bench_test.go | 12 +-- .../entity/networktree/manager_bench_test.go | 4 +- central/networkpolicies/graph/bench_test.go | 4 +- .../node/datastore/datastore_bench_test.go | 6 +- .../processindicator/datastore/bench_test.go | 28 ++---- .../processindicator/pruner/pruner_test.go | 2 +- .../requestmgr/manager_impl_benchmark_test.go | 2 +- .../compliance_checks_bench_test.go | 11 +-- pkg/booleanpolicy/default_policies_test.go | 6 +- pkg/concurrency/key_fence_test.go | 2 +- .../sortedkeys/sorted_keys_test.go | 2 +- pkg/concurrency/value_stream_bench_test.go | 91 ++++++------------- pkg/cryptoutils/fingerprint_test.go | 8 +- pkg/grpc/codec_test.go | 4 +- pkg/grpc/ratelimit/ratelimit_impl_test.go | 2 +- pkg/process/filter/filter_bench_test.go | 3 +- pkg/protoutils/clone_bench_test.go | 8 +- pkg/search/postgres/common_test.go | 4 +- pkg/signatures/factory_test.go | 2 +- scale/tests/compliance_test.go | 4 +- scale/tests/dashboard_test.go | 3 +- scale/tests/policies_test.go | 3 +- sensor/debugger/k8s/trace_test.go | 3 +- .../resolver/resolver_bench_test.go | 4 +- .../resources/deployment_store_bench_test.go | 8 +- .../networkpolicy_store_bench_test.go | 6 +- .../resources/rbac/store_impl_test.go | 10 +- sensor/tests/pipeline/bench_test.go | 4 +- 33 files changed, 115 insertions(+), 170 deletions(-) diff --git a/central/compliance/aggregation/aggregation_test.go b/central/compliance/aggregation/aggregation_test.go index c4db4dc168cd1..c79a61e96d080 100644 --- a/central/compliance/aggregation/aggregation_test.go +++ b/central/compliance/aggregation/aggregation_test.go @@ -584,11 +584,10 @@ func mockBenchmarkRunResult() *storage.ComplianceRunResults { func BenchmarkAggregatedResults(b *testing.B) { result := mockBenchmarkRunResult() - b.ResetTimer() a := &aggregatorImpl{ standards: mockStandardsRepo(b), } - for i := 0; i < b.N; i++ { + for b.Loop() { a.getAggregatedResults(nil, storage.ComplianceAggregation_CHECK, []*storage.ComplianceRunResults{result}, &mask{}) } } diff --git a/central/deployment/datastore/datastore_bench_test.go b/central/deployment/datastore/datastore_bench_test.go index a12a8c10bab0f..f02eb38d99276 100644 --- a/central/deployment/datastore/datastore_bench_test.go +++ b/central/deployment/datastore/datastore_bench_test.go @@ -32,7 +32,7 @@ func BenchmarkSearchAllDeployments(b *testing.B) { } b.Run("SearchRetrievalList", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { deployments, err := deploymentsDatastore.SearchListDeployments(ctx, search2.EmptyQuery()) assert.NoError(b, err) assert.Len(b, deployments, numDeployments) @@ -40,7 +40,7 @@ func BenchmarkSearchAllDeployments(b *testing.B) { }) b.Run("SearchRetrievalFull", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { deployments, err := deploymentsDatastore.SearchRawDeployments(ctx, search2.EmptyQuery()) assert.NoError(b, err) assert.Len(b, deployments, numDeployments) diff --git a/central/detection/deploytime/default_policies_bench_test.go b/central/detection/deploytime/default_policies_bench_test.go index e47b70fc171bc..c8b8d485f0b46 100644 --- a/central/detection/deploytime/default_policies_bench_test.go +++ b/central/detection/deploytime/default_policies_bench_test.go @@ -13,7 +13,6 @@ import ( ) func BenchmarkDefaultPolicies(b *testing.B) { - b.StopTimer() policySet = detection.NewPolicySet(nil) @@ -31,8 +30,7 @@ func BenchmarkDefaultPolicies(b *testing.B) { dep := fixtures.GetDeployment() images := fixtures.DeploymentImages() - b.StartTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := detection.Detect(deploytime.DetectionContext{}, booleanpolicy.EnhancedDeployment{ Deployment: dep, Images: images, diff --git a/central/graphql/resolvers/image_scan_benchmark_test.go b/central/graphql/resolvers/image_scan_benchmark_test.go index 0d02d0fd3da74..9df4b2e2d616b 100644 --- a/central/graphql/resolvers/image_scan_benchmark_test.go +++ b/central/graphql/resolvers/image_scan_benchmark_test.go @@ -91,7 +91,7 @@ func BenchmarkImageResolver(b *testing.B) { } b.Run("GetImageComponentsInImageScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithScanQuery, "getImages", @@ -106,7 +106,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageComponentsWithoutImageScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithoutScanQuery, "getImages", @@ -121,7 +121,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageComponentsDerivedFieldsWithImageScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithScanLongQuery, "getImages", @@ -136,7 +136,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageComponentsDerivedWithoutImageScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithoutScanLongQuery, "getImages", @@ -151,7 +151,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageOnly", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageOnlyQuery, "getImages", @@ -166,7 +166,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageWithCounts", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithCountsQuery, "getImages", @@ -181,7 +181,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageScanTimeTopLevel", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithTopLevelScanTimeQuery, "getImages", @@ -196,7 +196,7 @@ func BenchmarkImageResolver(b *testing.B) { }) b.Run("GetImageScanTimeNested", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, imageWithNestedScanTimeQuery, "getImages", diff --git a/central/graphql/resolvers/node_scan_benchmark_test.go b/central/graphql/resolvers/node_scan_benchmark_test.go index 7ac5bb4eab9d1..f58034ca3d6d9 100644 --- a/central/graphql/resolvers/node_scan_benchmark_test.go +++ b/central/graphql/resolvers/node_scan_benchmark_test.go @@ -73,7 +73,7 @@ func BenchmarkNodeResolver(b *testing.B) { } b.Run("GetNodeComponentsInNodeScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeWithScanQuery, "getNodes", @@ -88,7 +88,7 @@ func BenchmarkNodeResolver(b *testing.B) { }) b.Run("GetNodeComponentsWithoutNodeScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeWithoutScanQuery, "getNodes", @@ -103,7 +103,7 @@ func BenchmarkNodeResolver(b *testing.B) { }) b.Run("GetNodeComponentsDerivedFieldsWithNodeScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeWithScanLongQuery, "getNodes", @@ -118,7 +118,7 @@ func BenchmarkNodeResolver(b *testing.B) { }) b.Run("GetNodeComponentsDerivedWithoutNodeScanResolver", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeWithoutScanLongQuery, "getNodes", @@ -133,7 +133,7 @@ func BenchmarkNodeResolver(b *testing.B) { }) b.Run("GetNodeOnly", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeOnlyQuery, "getNodes", @@ -148,7 +148,7 @@ func BenchmarkNodeResolver(b *testing.B) { }) b.Run("GetNodeWithCounts", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { response := schema.Exec(ctx, nodeWithCountsQuery, "getNodes", diff --git a/central/networkgraph/entity/datastore/datastore_bench_test.go b/central/networkgraph/entity/datastore/datastore_bench_test.go index 3a79285aaf6f2..12114c5e40052 100644 --- a/central/networkgraph/entity/datastore/datastore_bench_test.go +++ b/central/networkgraph/entity/datastore/datastore_bench_test.go @@ -46,9 +46,7 @@ func BenchmarkNetEntityCreates(b *testing.B) { entities, err := testutils.GenRandomExtSrcNetworkEntity(pkgNet.IPv4, b.N, testconsts.Cluster1) require.NoError(b, err) - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { require.NoError(b, ds.CreateExternalNetworkEntity(globalAccessCtx, entities[i], true)) } } @@ -68,9 +66,7 @@ func BenchmarkNetEntityCreateBatch(b *testing.B) { entities, err := testutils.GenRandomExtSrcNetworkEntity(pkgNet.IPv4, 10000, testconsts.Cluster1) require.NoError(b, err) - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { _, err = ds.CreateExtNetworkEntitiesForCluster(globalAccessCtx, testconsts.Cluster1, entities...) require.NoError(b, err) @@ -98,9 +94,7 @@ func BenchmarkNetEntityUpdates(b *testing.B) { require.NoError(b, ds.CreateExternalNetworkEntity(globalAccessCtx, e, true)) } - b.ResetTimer() - - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { require.NoError(b, ds.UpdateExternalNetworkEntity(globalAccessCtx, entities[i], true)) } } diff --git a/central/networkgraph/entity/networktree/manager_bench_test.go b/central/networkgraph/entity/networktree/manager_bench_test.go index 8c6f3e1bba18f..5b56cec55d96b 100644 --- a/central/networkgraph/entity/networktree/manager_bench_test.go +++ b/central/networkgraph/entity/networktree/manager_bench_test.go @@ -38,14 +38,14 @@ func BenchmarkCreateNetworkTree(b *testing.B) { // hence resulting in comparison with every node for each new entry. b.Run("initialize", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { mgr := initialize(b, entitiesByCluster) require.NotNil(b, mgr) } }) b.Run("insertIntoNetworkTree", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() mgr := initialize(b, entitiesByCluster) t := mgr.CreateNetworkTree(context.Background(), "c2") diff --git a/central/networkpolicies/graph/bench_test.go b/central/networkpolicies/graph/bench_test.go index 6c074797e35d6..9f1eed10788a9 100644 --- a/central/networkpolicies/graph/bench_test.go +++ b/central/networkpolicies/graph/bench_test.go @@ -102,8 +102,8 @@ func benchmarkEvaluateCluster(b *testing.B, numDeployments, numNetworkPolicies, applyPolicies(networkPolicies, deployments, numPoliciesApplyTo) matchIngressRules(networkPolicies, deployments, ingressMatches) matchEgressRules(networkPolicies, deployments, egressMatches) - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { m.GetGraph("", nil, deployments, nil, networkPolicies, false) } } diff --git a/central/node/datastore/datastore_bench_test.go b/central/node/datastore/datastore_bench_test.go index 2598a6fabdd4c..def4cb8ce4086 100644 --- a/central/node/datastore/datastore_bench_test.go +++ b/central/node/datastore/datastore_bench_test.go @@ -39,14 +39,14 @@ func BenchmarkNodes(b *testing.B) { // Stored node is read because it contains new scan. b.Run("upsertNodeWithOldScan", func(b *testing.B) { fakeNode.Scan.ScanTime.Seconds = fakeNode.GetScan().GetScanTime().GetSeconds() - 500 - for i := 0; i < b.N; i++ { + for b.Loop() { require.NoError(b, nodeDS.UpsertNode(ctx, fakeNode)) } }) b.Run("upsertNodeWithNewScan", func(b *testing.B) { fakeNode.Scan.ScanTime.Seconds = fakeNode.GetScan().GetScanTime().GetSeconds() + 500 - for i := 0; i < b.N; i++ { + for b.Loop() { require.NoError(b, nodeDS.UpsertNode(ctx, fakeNode)) } }) @@ -64,7 +64,7 @@ func BenchmarkNodes(b *testing.B) { }) b.Run("deleteForClusters", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { idx := i % len(nodes) err := nodeDS.DeleteAllNodesForCluster(ctx, nodes[idx].GetClusterId()) require.NoError(b, err) diff --git a/central/processindicator/datastore/bench_test.go b/central/processindicator/datastore/bench_test.go index f1a63a93633cc..2dd0ee9495eef 100644 --- a/central/processindicator/datastore/bench_test.go +++ b/central/processindicator/datastore/bench_test.go @@ -32,8 +32,8 @@ func BenchmarkAddIndicator(b *testing.B) { datastore := New(db, store, plopStore, nil) ctx := sac.WithAllAccess(context.Background()) - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := datastore.AddProcessIndicators(ctx, indicators...) require.NoError(b, err) } @@ -131,8 +131,7 @@ func BenchmarkProcessIndicators(b *testing.B) { // Search benchmarks - non-destructive, can run multiple times b.Run("Search/ByDeployment1", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.DeploymentID, fixtureconsts.Deployment1).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -141,8 +140,7 @@ func BenchmarkProcessIndicators(b *testing.B) { b.Run("Search/ByDeployment2", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.DeploymentID, fixtureconsts.Deployment2).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -151,8 +149,7 @@ func BenchmarkProcessIndicators(b *testing.B) { b.Run("Search/ByDeployment3", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.DeploymentID, fixtureconsts.Deployment3).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -161,8 +158,7 @@ func BenchmarkProcessIndicators(b *testing.B) { b.Run("Search/ByD1PodID1", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.PodUID, d1PodID1).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -171,8 +167,7 @@ func BenchmarkProcessIndicators(b *testing.B) { b.Run("Search/ByD2PodID1", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.PodUID, d2PodID1).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -181,8 +176,7 @@ func BenchmarkProcessIndicators(b *testing.B) { b.Run("Search/ByD3PodID1", func(b *testing.B) { query := search.NewQueryBuilder().AddExactMatches(search.PodUID, d3PodID1).ProtoQuery() - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { results, err := datastore.SearchRawProcessIndicators(ctx, query) require.NoError(b, err) require.True(b, len(results) > 0) @@ -217,8 +211,7 @@ func BenchmarkProcessIndicators(b *testing.B) { } b.Run("Delete/ByDeployment1", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() // Re-add before each iteration err := datastore.AddProcessIndicators(ctx, d1Results...) @@ -232,8 +225,7 @@ func BenchmarkProcessIndicators(b *testing.B) { }) b.Run("Delete/ByD1PodID2", func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() // Re-add before each iteration err := datastore.AddProcessIndicators(ctx, d1PodID2Results...) diff --git a/central/processindicator/pruner/pruner_test.go b/central/processindicator/pruner/pruner_test.go index 23e56a3c23f03..69ec6b2b7a171 100644 --- a/central/processindicator/pruner/pruner_test.go +++ b/central/processindicator/pruner/pruner_test.go @@ -82,7 +82,7 @@ func BenchmarkRabbitMQPruning(b *testing.B) { for i := 0; i < 1000000; i++ { processes = append(processes, processToIDAndArgs(rabbitMQBeamSMPProcess())) } - for i := 0; i < b.N; i++ { + for b.Loop() { pruner := NewFactory(1, time.Second).StartPruning() pruner.Prune(processes) pruner.Finish() diff --git a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_benchmark_test.go b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_benchmark_test.go index 5c89980af44ce..cbd8f3918e9fb 100644 --- a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_benchmark_test.go +++ b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_benchmark_test.go @@ -32,7 +32,7 @@ func BenchmarkCreate(b *testing.B) { manager := New(nil, vulnReqDataStore, pendingCache, nil, nil, nil, nil, nil, nil, nil) b.Run("create", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { tag := fmt.Sprintf("%d", rand.Intn(numExistingReqs)) req := fixtures.GetImageScopeDeferralRequest("registry", "remote", tag, "cve-2023-xyz") req.Id = "" diff --git a/compliance/collection/compliance_checks/compliance_checks_bench_test.go b/compliance/collection/compliance_checks/compliance_checks_bench_test.go index dcf2e0a3d5d4f..765412602dbce 100644 --- a/compliance/collection/compliance_checks/compliance_checks_bench_test.go +++ b/compliance/collection/compliance_checks/compliance_checks_bench_test.go @@ -27,8 +27,8 @@ func BenchmarkRunChecks(b *testing.B) { conf := &sensor.MsgToCompliance_ScrapeConfig{ ContainerRuntime: storage.ContainerRuntime_DOCKER_CONTAINER_RUNTIME, } - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { getCheckResults(run, conf, data) } } @@ -47,8 +47,7 @@ func BenchmarkCompressResults(b *testing.B) { } results := getCheckResults(run, conf, data) - b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err := cutils.CompressResults(results) if err != nil { panic(err) @@ -68,8 +67,8 @@ func BenchmarkChecksAndCompression(b *testing.B) { conf := &sensor.MsgToCompliance_ScrapeConfig{ ContainerRuntime: storage.ContainerRuntime_DOCKER_CONTAINER_RUNTIME, } - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { results := getCheckResults(run, conf, data) _, err := cutils.CompressResults(results) if err != nil { diff --git a/pkg/booleanpolicy/default_policies_test.go b/pkg/booleanpolicy/default_policies_test.go index 19289373e242e..ad8356f49150b 100644 --- a/pkg/booleanpolicy/default_policies_test.go +++ b/pkg/booleanpolicy/default_policies_test.go @@ -3956,7 +3956,7 @@ func BenchmarkProcessPolicies(b *testing.B) { require.NoError(b, err) b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { _, err := m.MatchDeploymentWithProcess(nil, enhancedDeployment(dep, images), indicators[dep.GetId()][key], processesNotInBaseline[dep.GetId()].Contains(key)) @@ -3977,7 +3977,7 @@ func BenchmarkProcessPolicies(b *testing.B) { b.Run(fmt.Sprintf("benchmark caching: %s/%s", dep.GetId(), key), func(b *testing.B) { var resNoCaching Violations b.Run("no caching", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { var err error resNoCaching, err = m.MatchDeploymentWithProcess(nil, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) require.NoError(b, err) @@ -3987,7 +3987,7 @@ func BenchmarkProcessPolicies(b *testing.B) { var resWithCaching Violations b.Run("with caching", func(b *testing.B) { var cache CacheReceptacle - for i := 0; i < b.N; i++ { + for b.Loop() { var err error resWithCaching, err = m.MatchDeploymentWithProcess(&cache, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) require.NoError(b, err) diff --git a/pkg/concurrency/key_fence_test.go b/pkg/concurrency/key_fence_test.go index 0f4d458b0dbda..9d96187b30fb3 100644 --- a/pkg/concurrency/key_fence_test.go +++ b/pkg/concurrency/key_fence_test.go @@ -9,7 +9,7 @@ import ( ) func BenchmarkKeyFence(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { counters := []int{0, 0} keyFence := NewKeyFence() diff --git a/pkg/concurrency/sortedkeys/sorted_keys_test.go b/pkg/concurrency/sortedkeys/sorted_keys_test.go index 3e2f52c83b833..919445a913982 100644 --- a/pkg/concurrency/sortedkeys/sorted_keys_test.go +++ b/pkg/concurrency/sortedkeys/sorted_keys_test.go @@ -43,7 +43,7 @@ func TestSortedKeys(t *testing.T) { } func BenchmarkSortedKeys(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { var sk SortedKeys sk, _ = sk.Insert([]byte("4key4")) sk, _ = sk.Insert([]byte("key1")) diff --git a/pkg/concurrency/value_stream_bench_test.go b/pkg/concurrency/value_stream_bench_test.go index 7c48e3e18620a..01b66462ad5a0 100644 --- a/pkg/concurrency/value_stream_bench_test.go +++ b/pkg/concurrency/value_stream_bench_test.go @@ -10,31 +10,29 @@ import ( ) func BenchmarkValueStreamWrite(b *testing.B) { - b.StopTimer() vs := NewValueStream(0) - b.StartTimer() - for i := 0; i < b.N; i++ { - vs.Push(i) + for b.Loop() { + vs.Push(1) } } func BenchmarkBufChanWrite(b *testing.B) { - b.StopTimer() - c := make(chan int, b.N) + c := make(chan struct{}, b.N) - b.StartTimer() - for i := 0; i < b.N; i++ { - c <- i + for b.Loop() { + c <- struct{}{} } } func BenchmarkBuf1ChanWrite(b *testing.B) { - b.StopTimer() - c := make(chan int, 1) + c := make(chan struct{}, 1) + b.Cleanup(func() { + close(c) + }) // Read from channel in a tight loop go func() { @@ -44,18 +42,17 @@ func BenchmarkBuf1ChanWrite(b *testing.B) { } }() - b.StartTimer() - for i := 0; i < b.N; i++ { - c <- i + for b.Loop() { + c <- struct{}{} } - b.StopTimer() - close(c) } func BenchmarkUnbufChanWrite(b *testing.B) { - b.StopTimer() - c := make(chan int) + c := make(chan struct{}) + b.Cleanup(func() { + close(c) + }) // Read from channel in a tight loop go func() { @@ -65,41 +62,33 @@ func BenchmarkUnbufChanWrite(b *testing.B) { } }() - b.StartTimer() - for i := 0; i < b.N; i++ { - c <- i + for b.Loop() { + c <- struct{}{} } - b.StopTimer() - close(c) } func BenchmarkSliceAppend(b *testing.B) { - b.StopTimer() - var slice []int + var slice []struct{} - b.StartTimer() - for i := 0; i < b.N; i++ { - slice = append(slice, i) //nolint:staticcheck // SA4010 slice append without reading is intended + for b.Loop() { + slice = append(slice, struct{}{}) //nolint:staticcheck // SA4010 slice append without reading is intended } } func BenchmarkSliceAppendWithMutex(b *testing.B) { - b.StopTimer() - var slice []int + var slice []struct{} var mutex sync.Mutex - b.StartTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { WithLock(&mutex, func() { - slice = append(slice, i) + slice = append(slice, struct{}{}) }) } } func BenchmarkValueStreamRead(b *testing.B) { - b.StopTimer() vs := NewValueStream(0) it := vs.Iterator(true) @@ -111,17 +100,14 @@ func BenchmarkValueStreamRead(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(b.N)*time.Millisecond) defer cancel() - b.StartTimer() - var err error - for i := 0; i < b.N && err == nil; i++ { + for b.Loop() && err == nil { it, err = it.Next(ctx) } require.NoError(b, err) } func BenchmarkValueStreamReadAsync(b *testing.B) { - b.StopTimer() vs := NewValueStream(0) it := vs.Iterator(true) @@ -135,17 +121,14 @@ func BenchmarkValueStreamReadAsync(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(b.N)*time.Millisecond) defer cancel() - b.StartTimer() - var err error - for i := 0; i < b.N && err == nil; i++ { + for b.Loop() && err == nil { it, err = it.Next(ctx) } require.NoError(b, err) } func BenchmarkBufChanRead(b *testing.B) { - b.StopTimer() c := make(chan int, b.N) for i := 0; i < b.N; i++ { @@ -155,10 +138,8 @@ func BenchmarkBufChanRead(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(b.N)*time.Millisecond) defer cancel() - b.StartTimer() - var err error - for i := 0; i < b.N && err == nil; i++ { + for b.Loop() && err == nil { select { case <-c: case <-ctx.Done(): @@ -169,7 +150,6 @@ func BenchmarkBufChanRead(b *testing.B) { } func BenchmarkBuf1ChanRead(b *testing.B) { - b.StopTimer() c := make(chan int, 1) @@ -183,10 +163,8 @@ func BenchmarkBuf1ChanRead(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(b.N)*time.Millisecond) defer cancel() - b.StartTimer() - var err error - for i := 0; i < b.N && err == nil; i++ { + for b.Loop() && err == nil { select { case <-c: case <-ctx.Done(): @@ -196,7 +174,6 @@ func BenchmarkBuf1ChanRead(b *testing.B) { } func BenchmarkUnbufChanRead(b *testing.B) { - b.StopTimer() c := make(chan int) @@ -210,10 +187,8 @@ func BenchmarkUnbufChanRead(b *testing.B) { ctx, cancel := context.WithTimeout(context.Background(), time.Duration(b.N)*time.Millisecond) defer cancel() - b.StartTimer() - var err error - for i := 0; i < b.N && err == nil; i++ { + for b.Loop() && err == nil { select { case <-c: case <-ctx.Done(): @@ -223,23 +198,19 @@ func BenchmarkUnbufChanRead(b *testing.B) { } func BenchmarkSliceRead(b *testing.B) { - b.StopTimer() slice := make([]int, 0, b.N) for i := 0; i < b.N; i++ { slice = append(slice, i) } - b.StartTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { slice = slice[1:] } require.Empty(b, slice) } func BenchmarkSliceReadWithMutex(b *testing.B) { - b.StopTimer() slice := make([]int, 0, b.N) for i := 0; i < b.N; i++ { @@ -247,9 +218,7 @@ func BenchmarkSliceReadWithMutex(b *testing.B) { } var mutex sync.Mutex - b.StartTimer() - - for i := 0; i < b.N; i++ { + for b.Loop() { WithLock(&mutex, func() { slice = slice[1:] }) diff --git a/pkg/cryptoutils/fingerprint_test.go b/pkg/cryptoutils/fingerprint_test.go index f9c31aa917160..fa6d64684ac9d 100644 --- a/pkg/cryptoutils/fingerprint_test.go +++ b/pkg/cryptoutils/fingerprint_test.go @@ -15,25 +15,25 @@ func BenchmarkCertFingerprintChoices(b *testing.B) { b.Fatalf("Expected %d bytes of randomness but got %d with error %v", len(fakeCert), n, err) } b.Run("SHA-1", func(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { sum := sha1.Sum(fakeCert) _ = formatID(sum[:]) } }) b.Run("SHA-256", func(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { sum := sha256.Sum256(fakeCert) _ = formatID(sum[:]) } }) b.Run("SHA-512", func(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { sum := sha512.Sum512(fakeCert) _ = formatID(sum[:]) } }) b.Run("SHA-512_256", func(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { sum := sha512.Sum512_256(fakeCert) _ = formatID(sum[:]) } diff --git a/pkg/grpc/codec_test.go b/pkg/grpc/codec_test.go index 1f74031716f2e..6bdd7e8d12c67 100644 --- a/pkg/grpc/codec_test.go +++ b/pkg/grpc/codec_test.go @@ -122,7 +122,7 @@ func BenchmarkProtoUnmarshal(b *testing.B) { b.Run("small", func(b *testing.B) { b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = svc.SuppressCVEs(context.Background(), &request) } }) @@ -132,7 +132,7 @@ func BenchmarkProtoUnmarshal(b *testing.B) { request.Cves = append(request.Cves, fmt.Sprintf("CVE-%d", i)) } b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = svc.SuppressCVEs(context.Background(), &request) } }) diff --git a/pkg/grpc/ratelimit/ratelimit_impl_test.go b/pkg/grpc/ratelimit/ratelimit_impl_test.go index ad80e8dd8033a..fee617b877c07 100644 --- a/pkg/grpc/ratelimit/ratelimit_impl_test.go +++ b/pkg/grpc/ratelimit/ratelimit_impl_test.go @@ -168,7 +168,7 @@ func BenchmarkRateLimiter(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { l := NewRateLimiter(tt.maxPerSec, tt.maxThrottleDuration) - for i := 0; i < b.N; i++ { + for b.Loop() { _ = l.Limit(context.Background()) } }) diff --git a/pkg/process/filter/filter_bench_test.go b/pkg/process/filter/filter_bench_test.go index 2a4aa83eefdd2..4ef360ec80d58 100644 --- a/pkg/process/filter/filter_bench_test.go +++ b/pkg/process/filter/filter_bench_test.go @@ -26,8 +26,7 @@ func BenchmarkAdd(b *testing.B) { } } - b.ResetTimer() - for i := 0; i < b.N; i++ { + for i := 0; b.Loop(); i++ { filter.Add(indicators[i%len(indicators)]) } } diff --git a/pkg/protoutils/clone_bench_test.go b/pkg/protoutils/clone_bench_test.go index 396a4cfbc6a81..031a4d458db01 100644 --- a/pkg/protoutils/clone_bench_test.go +++ b/pkg/protoutils/clone_bench_test.go @@ -10,8 +10,8 @@ import ( // This benchmark uses reflection func BenchmarkGolangProtoClone(b *testing.B) { st := fixtures.GetImage() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { _ = proto.Clone(st) } } @@ -19,8 +19,8 @@ func BenchmarkGolangProtoClone(b *testing.B) { // This benchmark uses autogenerated clones func BenchmarkAutogeneratedClone(b *testing.B) { st := fixtures.GetImage() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { _ = st.CloneVT() } } diff --git a/pkg/search/postgres/common_test.go b/pkg/search/postgres/common_test.go index 1b22f178e1acf..5514f4692974c 100644 --- a/pkg/search/postgres/common_test.go +++ b/pkg/search/postgres/common_test.go @@ -58,12 +58,12 @@ func TestReplaceVars(t *testing.T) { func BenchmarkReplaceVars(b *testing.B) { veryLongString := strings.Repeat("$$ ", 1000) b.Run("short", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { replaceVars("$$ $$ $$ $$ $$ $$ $$ $$ $$ $$ $$") } }) b.Run("long", func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { replaceVars(veryLongString) } }) diff --git a/pkg/signatures/factory_test.go b/pkg/signatures/factory_test.go index 1ce6c28afab53..f6802aa55559d 100644 --- a/pkg/signatures/factory_test.go +++ b/pkg/signatures/factory_test.go @@ -131,7 +131,7 @@ func BenchmarkVerifyAgainstSignatureIntegrations(b *testing.B) { } func benchmarkVerifyAgainstSignatureIntegrations(integrations []*storage.SignatureIntegration, img *storage.Image, b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { VerifyAgainstSignatureIntegrations(context.Background(), integrations, img) } } diff --git a/scale/tests/compliance_test.go b/scale/tests/compliance_test.go index 8afe6c3b2e247..effb985de4dec 100644 --- a/scale/tests/compliance_test.go +++ b/scale/tests/compliance_test.go @@ -193,8 +193,8 @@ func BenchmarkCompliance(b *testing.B) { log.Info("completed compliance run, beginning benchmark") // Build the queries we are going to run complianceQueries := makeComplianceQueries(complianceRuns) - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { // Run all queries asynchronously and wait for each to finish loadComplianceResults(envVars, client, complianceQueries) } diff --git a/scale/tests/dashboard_test.go b/scale/tests/dashboard_test.go index dd1c15e72a257..9628d8ba252e2 100644 --- a/scale/tests/dashboard_test.go +++ b/scale/tests/dashboard_test.go @@ -60,8 +60,7 @@ func BenchmarkDashboard(b *testing.B) { alertService := v1.NewAlertServiceClient(connection) deploymentService := v1.NewDeploymentServiceClient(connection) - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { wg := concurrency.NewWaitGroup(0) asyncWithWaitGroup(getAlertsSummaryByCluster(alertService), &wg) asyncWithWaitGroup(getDeploymentsWithProcessInfo(deploymentService), &wg) diff --git a/scale/tests/policies_test.go b/scale/tests/policies_test.go index 2c2b341f7c503..acad78a19ed32 100644 --- a/scale/tests/policies_test.go +++ b/scale/tests/policies_test.go @@ -80,8 +80,7 @@ func BenchmarkDryRunPolicies(b *testing.B) { log.Fatal(err) } - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { jobChan := make(chan string, len(defPolicies)) wg := concurrency.NewWaitGroup(0) // Consumer of submitted jobs. diff --git a/sensor/debugger/k8s/trace_test.go b/sensor/debugger/k8s/trace_test.go index 14ea716a81903..71b1e3fb30231 100644 --- a/sensor/debugger/k8s/trace_test.go +++ b/sensor/debugger/k8s/trace_test.go @@ -13,9 +13,8 @@ func BenchmarkWrite(b *testing.B) { data := []byte("abc") writer, err := NewTraceWriter(path.Join(dir, "test")) assert.NoError(b, err) - b.ResetTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { _, err := writer.Write(data) assert.NoError(b, err) } diff --git a/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go b/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go index e76ab8c1ec71c..b43aa432df703 100644 --- a/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go +++ b/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go @@ -74,7 +74,7 @@ func BenchmarkProcessDeploymentReferences(b *testing.B) { b.Setenv(features.SensorInternalPubSub.EnvVar(), fmt.Sprintf("%t", value)) for _, bc := range cases { b.Run(fmt.Sprintf("Benchmark with %d events and %d deployments per event and %q is %t", bc.numEvents, bc.numDeployments, features.SensorInternalPubSub.EnvVar(), value), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() doneSignal := concurrency.NewSignal() setupMocks(b, &doneSignal, value) @@ -98,7 +98,7 @@ func BenchmarkProcessRandomDeploymentReferences(b *testing.B) { b.Setenv(features.SensorInternalPubSub.EnvVar(), fmt.Sprintf("%t", value)) for _, bc := range cases { b.Run(fmt.Sprintf("Benchmark with %d events and %d random deployments per event and %q is %t", bc.numEvents, bc.numDeployments, features.SensorInternalPubSub.EnvVar(), value), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() doneSignal := concurrency.NewSignal() setupMocks(b, &doneSignal, value) diff --git a/sensor/kubernetes/listener/resources/deployment_store_bench_test.go b/sensor/kubernetes/listener/resources/deployment_store_bench_test.go index 9167633a538c3..b74b7cb7cc0f8 100644 --- a/sensor/kubernetes/listener/resources/deployment_store_bench_test.go +++ b/sensor/kubernetes/listener/resources/deployment_store_bench_test.go @@ -29,7 +29,7 @@ type namespaceAndSelector struct { // 10k updates without meaningful change. This is to test that // we don't do useless clones if the object is the same. func BenchmarkBuildDeployments_NoChange(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() benchStore = newDeploymentStore() deployment1 := createDeploymentWrap() @@ -51,7 +51,7 @@ func BenchmarkBuildDeployments_NoChange(b *testing.B) { // 10k meaningful updates, which should result in a new deployment // object. func BenchmarkBuildDeployments_Change(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { b.StopTimer() benchStore = newDeploymentStore() deployment1 := createDeploymentWrap() @@ -76,7 +76,7 @@ func BenchmarkBuildDeployments_Change(b *testing.B) { func BenchmarkDeleteAllDeployments(b *testing.B) { for _, numDeployments := range []int{1000, 5000, 10_000, 25_000} { b.Run(fmt.Sprintf("num_deployments: %d", numDeployments), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() benchStore = newDeploymentStore() for i := 0; i < 1000; i++ { @@ -90,7 +90,7 @@ func BenchmarkDeleteAllDeployments(b *testing.B) { } func BenchmarkFindDeploymentIDsByLabels(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { b.StopTimer() benchStore = newDeploymentStore() for i := 0; i < 1000; i++ { diff --git a/sensor/kubernetes/listener/resources/networkpolicy_store_bench_test.go b/sensor/kubernetes/listener/resources/networkpolicy_store_bench_test.go index 28731af7dfc7a..8cecf8f7581dc 100644 --- a/sensor/kubernetes/listener/resources/networkpolicy_store_bench_test.go +++ b/sensor/kubernetes/listener/resources/networkpolicy_store_bench_test.go @@ -94,7 +94,7 @@ func BenchmarkFind(b *testing.B) { s := newNetworkPoliciesStore() populateStore(s, int64(math.Pow(10, float64(scale))), scale, allLabels16, allValues16) b.Run(fmt.Sprintf("K=%d-N=10^%d", numLabels, scale), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { _ = s.Find(defaultNS, selectors[labelIdx]) } }) @@ -115,7 +115,7 @@ func BenchmarkUpsert_Update(b *testing.B) { } newPolicy := newNPDummy(oldPolicy.GetId(), defaultNS, selectors[labelIdx]) b.Run(fmt.Sprintf("L=%d-N=10^%d", numLabels, scale), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { s.Upsert(newPolicy) } }) @@ -129,7 +129,7 @@ func BenchmarkUpsert_Add(b *testing.B) { s := newNetworkPoliciesStore() populateStore(s, int64(math.Pow(10, float64(scale))), scale, allLabels16, allValues16) b.Run(fmt.Sprintf("L=%d-N=10^%d", numLabels, scale), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { np := newNPDummy(uuid.NewV4().String(), getRandom(namespaces), selectors[labelIdx]) s.Upsert(np) } diff --git a/sensor/kubernetes/listener/resources/rbac/store_impl_test.go b/sensor/kubernetes/listener/resources/rbac/store_impl_test.go index 92fa254f5bf74..fbae1b637c06f 100644 --- a/sensor/kubernetes/listener/resources/rbac/store_impl_test.go +++ b/sensor/kubernetes/listener/resources/rbac/store_impl_test.go @@ -609,13 +609,13 @@ func generateStore(counts storeObjectCounts) Store { } func BenchmarkRBACStoreUpsertTime(b *testing.B) { - for n := 0; n < b.N; n++ { + for b.Loop() { generateStore(storeObjectCounts{roles: 1000, bindings: 10_000, namespaces: 10}) } } func runRBACBenchmarkGetPermissionLevelForDeployment(b *testing.B, store Store, keepCache bool) { - for n := 0; n < b.N; n++ { + for b.Loop() { store.GetPermissionLevelForDeployment( &storage.Deployment{ServiceAccount: "default-subject", Namespace: "namespace0"}) if !keepCache { @@ -660,7 +660,7 @@ func BenchmarkRBACStoreAssignPermissionLevelToDeployment(b *testing.B) { } func BenchmarkRBACUpsertExistingBinding(b *testing.B) { - b.StopTimer() + store := NewStore() binding := &v1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -673,8 +673,8 @@ func BenchmarkRBACUpsertExistingBinding(b *testing.B) { }, } store.UpsertBinding(binding) - b.StartTimer() - for n := 0; n < b.N; n++ { + + for b.Loop() { store.UpsertBinding(binding) } } diff --git a/sensor/tests/pipeline/bench_test.go b/sensor/tests/pipeline/bench_test.go index 91a04db784fba..284348ba6b5b0 100644 --- a/sensor/tests/pipeline/bench_test.go +++ b/sensor/tests/pipeline/bench_test.go @@ -71,7 +71,6 @@ func randString(n int) string { } func Benchmark_Pipeline(b *testing.B) { - b.StopTimer() setupOnce.Do(func() { fakeClient = k8s.MakeFakeClient() @@ -92,8 +91,7 @@ func Benchmark_Pipeline(b *testing.B) { setupSensor(fakeCentral, fakeClient) }) - b.StartTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { testNamespace := randString(10) _, err := fakeClient.Kubernetes().CoreV1().Namespaces().Create(context.Background(), &core.Namespace{ ObjectMeta: metav1.ObjectMeta{ From a9a8ef4327c8b2d9e15ee7d0183bcec794dfd738 Mon Sep 17 00:00:00 2001 From: Mauro Ezequiel Moltrasio Date: Thu, 29 Jan 2026 17:52:06 +0100 Subject: [PATCH 077/232] chore(fact): konflux required bits (#18603) Co-authored-by: Tom Martensen --- .github/workflows/release-ci.yaml | 13 ++++ .../workflows/scan-image-vulnerabilities.yaml | 2 + .github/workflows/style.yaml | 16 ++--- .tekton/operator-bundle-pipeline.yaml | 18 ++++++ .tekton/retag-fact.yaml | 59 +++++++++++++++++++ .tekton/retag-pipeline.yaml | 10 ++-- FACT_VERSION | 2 +- scripts/ci/bats/lib_release_version_test.bats | 20 +++++++ scripts/ci/lib.sh | 11 ++++ 9 files changed, 138 insertions(+), 13 deletions(-) create mode 100644 .tekton/retag-fact.yaml diff --git a/.github/workflows/release-ci.yaml b/.github/workflows/release-ci.yaml index 1ec4ae0e1fbe8..42459aed8e2d1 100644 --- a/.github/workflows/release-ci.yaml +++ b/.github/workflows/release-ci.yaml @@ -70,6 +70,19 @@ jobs: scripts/ci/lib.sh \ check_collector_version + check-fact-version: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + fetch-depth: 0 + ref: ${{ github.ref_name }} + - name: Check fact version + run: | + scripts/ci/lib.sh \ + check_fact_version + publish-helm-charts: needs: [build, check-scanner-version, check-collector-version, check-is-release] if: needs.check-is-release.outputs.is_release == 'true' diff --git a/.github/workflows/scan-image-vulnerabilities.yaml b/.github/workflows/scan-image-vulnerabilities.yaml index 1f022e587d7fb..7fd100e9b11c1 100644 --- a/.github/workflows/scan-image-vulnerabilities.yaml +++ b/.github/workflows/scan-image-vulnerabilities.yaml @@ -59,6 +59,7 @@ jobs: [ "central-db", "collector", + "fact", "main", "roxctl", "scanner", @@ -114,6 +115,7 @@ jobs: [ "release-central-db", "release-collector", + "release-fact", "release-main", "release-roxctl", "release-scanner", diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index c6f183a8739af..4991ff69d19d0 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -162,14 +162,14 @@ jobs: - name: Check Cache golangci-lint run: make golangci-lint-cache-status - check-collector-and-scanner-images-exist: - # This job ensures that COLLECTOR_VERSION or SCANNER_VERSION files cannot be updated to a version for which the - # image was not successfully built on Konflux (suffix "-fast"). It also verifies that GHA-built image is there (no - # suffix) so that the failure also happens in this job. + check-dependent-images-exist: + # This job ensures that COLLECTOR_VERSION, FACT_VERSION or SCANNER_VERSION files cannot be updated to a version + # for which the image was not successfully built on Konflux (suffix "-fast"). It also verifies that GHA-built image + # is there (no suffix) so that the failure also happens in this job. runs-on: ubuntu-latest strategy: matrix: - image: ["collector", "scanner", "scanner-slim", "scanner-db", "scanner-db-slim"] + image: ["collector", "fact", "scanner", "scanner-slim", "scanner-db", "scanner-db-slim"] steps: - name: Checkout repo uses: actions/checkout@v6 @@ -178,11 +178,13 @@ jobs: with: gcp-account: ${{ secrets.GCP_SERVICE_ACCOUNT_STACKROX_CI }} - - name: Get image tag from COLLECTOR|SCANNER_VERSION file + - name: Get image tag from COLLECTOR|FACT|SCANNER_VERSION file id: image-tag run: | if [[ "${{ matrix.image }}" == "collector" ]]; then makefile_target="collector-tag" + elif [[ "${{ matrix.image }}" == "fact" ]]; then + makefile_target="fact-tag" else makefile_target="scanner-tag" fi @@ -271,7 +273,7 @@ jobs: - misc-checks - style-check - golangci-lint - - check-collector-and-scanner-images-exist + - check-dependent-images-exist - github-actions-lint - github-actions-shellcheck - openshift-ci-lint diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index 228a65276b860..2194f0f6023c5 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -201,6 +201,15 @@ spec: type: string default: "registry.redhat.io/advanced-cluster-security/rhacs-collector-rhel8" + - name: fact-image-build-repo + description: Repository where the (unreleased) fact image is pushed by its build pipeline. + type: string + default: "quay.io/rhacs-eng/release-fact" + - name: fact-image-catalog-repo + description: Repository within the Red Hat Container Catalog where the fact image is pushed to during the release. + type: string + default: "registry.redhat.io/advanced-cluster-security/rhacs-fact-rhel8" + - name: roxctl-image-build-repo description: Repository where the (unreleased) roxctl image is pushed by its build pipeline. type: string @@ -434,6 +443,14 @@ spec: # This timeout must be the same as the pipeline timeout in `collector-retag.yaml` timeout: 40m + - name: wait-for-fact-image + params: + - name: IMAGE + value: "$(params.fact-image-build-repo):$(tasks.determine-image-tag.results.IMAGE_TAG)" + taskRef: *wait-for-image-ref + # This timeout must be the same as the pipeline timeout in `retag-fact.yaml` + timeout: 40m + - name: wait-for-roxctl-image params: - name: IMAGE @@ -479,6 +496,7 @@ spec: - RELATED_IMAGE_SCANNER_V4=$(params.scanner-v4-image-catalog-repo)@$(tasks.wait-for-scanner-v4-image.results.IMAGE_DIGEST) - RELATED_IMAGE_SCANNER_V4_DB=$(params.scanner-v4-db-image-catalog-repo)@$(tasks.wait-for-scanner-v4-db-image.results.IMAGE_DIGEST) - RELATED_IMAGE_COLLECTOR=$(params.collector-image-catalog-repo)@$(tasks.wait-for-collector-image.results.IMAGE_DIGEST) + - RELATED_IMAGE_FACT=$(params.fact-image-catalog-repo)@$(tasks.wait-for-fact-image.results.IMAGE_DIGEST) - RELATED_IMAGE_ROXCTL=$(params.roxctl-image-catalog-repo)@$(tasks.wait-for-roxctl-image.results.IMAGE_DIGEST) - RELATED_IMAGE_CENTRAL_DB=$(params.central-db-image-catalog-repo)@$(tasks.wait-for-central-db-image.results.IMAGE_DIGEST) - name: SOURCE_ARTIFACT diff --git a/.tekton/retag-fact.yaml b/.tekton/retag-fact.yaml new file mode 100644 index 0000000000000..6b5e23daae706 --- /dev/null +++ b/.tekton/retag-fact.yaml @@ -0,0 +1,59 @@ +apiVersion: tekton.dev/v1 +kind: PipelineRun + +metadata: + annotations: + build.appstudio.openshift.io/repo: https://github.com/stackrox/stackrox?rev={{revision}} + build.appstudio.redhat.com/commit_sha: '{{revision}}' + build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' + build.appstudio.redhat.com/target_branch: '{{target_branch}}' + pipelinesascode.tekton.dev/max-keep-runs: "500" + # TODO(ROX-21073): re-enable for all PR branches + pipelinesascode.tekton.dev/on-cel-expression: | + ( + event == "push" && target_branch.matches("^(master|release-.*|refs/tags/.*)$") + ) || ( + event == "pull_request" && ( + target_branch.startsWith("release-") || + source_branch.matches("(konflux|renovate|appstudio|rhtap)") || + (has(body.pull_request) && has(body.pull_request.labels) && body.pull_request.labels.exists(l, l.name == "konflux-build")) + ) && body.action != "ready_for_review" + ) + # The empty `on-label` annotation is a workaround to make sure the pipeline gets triggered when the label gets first + # added to the PR. See the Slack tread linked from ROX-30580. + pipelinesascode.tekton.dev/on-label: "[]" + labels: + appstudio.openshift.io/application: acs + name: retag-fact + namespace: rh-acs-tenant + +spec: + + params: + - name: git-url + value: '{{source_url}}' + - name: revision + value: '{{revision}}' + - name: input-image-repo + value: quay.io/rhacs-eng/release-fact + - name: input-image-tag-makefile-target + value: fact-tag + - name: output-image-repo + value: quay.io/rhacs-eng/release-fact + + pipelineRef: + name: retag-pipeline + + taskRunTemplate: + serviceAccountName: build-pipeline-fact + + timeouts: + tasks: 30m + # Reserve time for final tasks to run. + finally: 10m + pipeline: 40m + + workspaces: + - name: git-auth + secret: + secretName: '{{ git_auth_secret }}' diff --git a/.tekton/retag-pipeline.yaml b/.tekton/retag-pipeline.yaml index 71cd2995cde74..9124aa506d5a4 100644 --- a/.tekton/retag-pipeline.yaml +++ b/.tekton/retag-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af - name: kind value: task resolver: bundles @@ -136,7 +136,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af - name: kind value: task resolver: bundles @@ -154,7 +154,7 @@ spec: - name: name value: determine-dependency-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af - name: kind value: task resolver: bundles @@ -170,7 +170,7 @@ spec: - name: name value: wait-for-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af - name: kind value: task resolver: bundles @@ -195,7 +195,7 @@ spec: - name: name value: retag-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af - name: kind value: task resolver: bundles diff --git a/FACT_VERSION b/FACT_VERSION index 2ac86b8890de7..ee8685c26aa9f 100644 --- a/FACT_VERSION +++ b/FACT_VERSION @@ -1 +1 @@ -0.2.x +0.2.x-5-gcf5e7419fa diff --git a/scripts/ci/bats/lib_release_version_test.bats b/scripts/ci/bats/lib_release_version_test.bats index 7cb57a68b2da3..07a602fb32faa 100644 --- a/scripts/ci/bats/lib_release_version_test.bats +++ b/scripts/ci/bats/lib_release_version_test.bats @@ -111,6 +111,26 @@ function make() { assert_success } +@test "spots fact tag is a master commit" { + declare -A tags=( [fact-tag]="0.2.x-23-g8a2e05d0ec") + run check_fact_version + assert_failure + assert_output --partial 'Fact tag does not look like a release tag' +} + +@test "spots fact tag is a release candidate" { + declare -A tags=( [fact-tag]="0.2.1-rc.1") + run check_fact_version + assert_failure + assert_output --partial 'Fact tag does not look like a release tag' +} + +@test "spots fact tag is a release" { + declare -A tags=( [fact-tag]="0.2.1") + run check_fact_version + assert_success +} + @test "spots scanner tag is a master commit" { declare -A tags=( [scanner-tag]="3.45.x-12-g8a2e05d0ec") run check_scanner_version diff --git a/scripts/ci/lib.sh b/scripts/ci/lib.sh index eb11a6c2fb63c..7a2ece08a8c33 100755 --- a/scripts/ci/lib.sh +++ b/scripts/ci/lib.sh @@ -543,6 +543,8 @@ push_matching_collector_scanner_images() { scanner_version="$(make --quiet --no-print-directory scanner-tag)" local collector_version collector_version="$(make --quiet --no-print-directory collector-tag)" + local fact_version + fact_version="$(make --quiet --no-print-directory fact-tag)" registry_rw_login "${registry}" @@ -552,6 +554,8 @@ push_matching_collector_scanner_images() { _retag "${registry}/scanner-db-slim:${scanner_version}" "${registry}/scanner-db-slim:${main_tag}-${arch}" _retag "${registry}/collector:${collector_version}" "${registry}/collector:${main_tag}-${arch}" + + _retag "${registry}/fact:${fact_version}" "${registry}/fact:${main_tag}-${arch}" } poll_for_system_test_images() { @@ -1055,6 +1059,13 @@ check_collector_version() { fi } +check_fact_version() { + if ! is_release_version "$(make --quiet --no-print-directory fact-tag)"; then + echo "::error::Fact tag does not look like a release tag. Please update FACT_VERSION file before releasing." + exit 1 + fi +} + publish_cli() { if [[ "$#" -ne 1 ]]; then die "missing arg. usage: publish_cli " From b930f1469033165a910f7495319e006737c140ed Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 29 Jan 2026 21:08:24 +0100 Subject: [PATCH 078/232] docs(operator): mention the operator in top level README.md (#18755) --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 6d647e0be8dde..fbae0276d6729 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ - [Deploying StackRox](#deploying-stackrox) - [Quick Installation using Helm](#quick-installation-using-helm) - [Manual Installation using Helm](#manual-installation-using-helm) + - [Installation using the Operator](#installation-using-the-operator) - [Installation via Scripts](#installation-via-scripts) - [Kubernetes Distributions (EKS, AKS, GKE)](#kubernetes-distributions-eks-aks-gke) - [OpenShift](#openshift) @@ -213,6 +214,11 @@ To further customize your Helm installation consult these documents: +### Installation using the Operator + +As of release 4.10 it's possible to [install StackRox using the operator](operator/install). +We encourage you to try this out and give us your feedback. + ### Installation via Scripts The `deploy` script will: From e80a7e3f2cef4a17b03c6334841bdbc353d6fb80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Thu, 29 Jan 2026 21:31:36 +0100 Subject: [PATCH 079/232] ROX-32867: Changelog Compliance v1 deprecation entry (#18756) --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f267c6769d6e..1047217411b23 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,10 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ### Deprecated Features - ROX-32851: The `roxctl netpol generate`, `roxctl netpol connectivity map`, and `roxctl netpol connectivity diff` commands are deprecated because they rely on the unmaintained NP-Guard library and will be removed in a future release. +- ROX-32867: The Compliance V1 feature has been deprecated, and it is planned to be removed in a future release. This includes: + - The Compliance Dashboard + - The Compliance V1 API endpoints + - The Compliance Configuration Management Board ### Technical Changes - ROX-30769: Update Node.js requirement for ui folder to 22.13.0 From 6a70f55f4e1d8fda47988cae0c994092709567ab Mon Sep 17 00:00:00 2001 From: "J. Victor Martins" Date: Thu, 29 Jan 2026 21:30:28 -0800 Subject: [PATCH 080/232] ROX-32860: Fix base image layer index comparison (#18749) --- central/baseimage/matcher/matcher.go | 2 +- central/baseimage/matcher/matcher_impl.go | 11 +- .../baseimage/matcher/matcher_impl_test.go | 24 +- central/baseimage/matcher/mocks/matcher.go | 4 +- .../image_components_v2_postgres_test.go | 2 +- central/graphql/resolvers/test_utils.go | 2 + .../datastore/store/common/v2/parts_test.go | 4 + .../datastore/store/common/v2/split_v2.go | 6 +- .../datastore/store/common/parts_test.go | 4 + .../imagev2/datastore/store/common/split.go | 6 +- generated/api/v1/image_service.swagger.json | 5 + .../api/v1/vuln_mgmt_service.swagger.json | 5 + generated/storage/image.pb.go | 19 +- generated/storage/image_vtproto.pb.go | 50 ++++ pkg/images/enricher/enricher.go | 2 +- pkg/images/enricher/enricher_impl.go | 96 +++++++- pkg/images/enricher/enricher_impl_test.go | 214 +++++++++++++++++- pkg/images/enricher/enricher_v2.go | 2 +- pkg/images/enricher/enricher_v2_impl.go | 3 +- pkg/images/enricher/enricher_v2_impl_test.go | 18 +- proto/storage/image.proto | 3 + proto/storage/proto.lock | 5 + 22 files changed, 444 insertions(+), 43 deletions(-) diff --git a/central/baseimage/matcher/matcher.go b/central/baseimage/matcher/matcher.go index a46315dd2e308..8f5bc09e3a104 100644 --- a/central/baseimage/matcher/matcher.go +++ b/central/baseimage/matcher/matcher.go @@ -8,5 +8,5 @@ import ( //go:generate mockgen-wrapper type Matcher interface { - MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) + MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImage, error) } diff --git a/central/baseimage/matcher/matcher_impl.go b/central/baseimage/matcher/matcher_impl.go index 96a3f7a2032d9..93d12bf6c8079 100644 --- a/central/baseimage/matcher/matcher_impl.go +++ b/central/baseimage/matcher/matcher_impl.go @@ -28,7 +28,7 @@ func New( } } -func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) { +func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImage, error) { start := time.Now() defer func() { @@ -45,7 +45,7 @@ func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ( if err != nil { return nil, fmt.Errorf("listing candidates for layer %s: %w", firstLayer, err) } - var baseImages []*storage.BaseImageInfo + var baseImages []*storage.BaseImage for _, c := range candidates { candidateLayers := c.GetLayers() slices.SortFunc(candidateLayers, func(a, b *storage.BaseImageLayer) int { @@ -63,12 +63,7 @@ func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ( } if match { - baseImages = append(baseImages, &storage.BaseImageInfo{ - BaseImageId: c.GetId(), - BaseImageFullName: fmt.Sprintf("%s:%s", c.GetRepository(), c.GetTag()), - BaseImageDigest: c.GetManifestDigest(), - Created: c.GetCreated(), - }) + baseImages = append(baseImages, c) } } return baseImages, nil diff --git a/central/baseimage/matcher/matcher_impl_test.go b/central/baseimage/matcher/matcher_impl_test.go index f208c2bcfeef2..06b1398b2150d 100644 --- a/central/baseimage/matcher/matcher_impl_test.go +++ b/central/baseimage/matcher/matcher_impl_test.go @@ -27,7 +27,7 @@ func TestMatchWithBaseImages(t *testing.T) { desc string imgLayers []string mockSetup func() - expected []*storage.BaseImageInfo + expected []*storage.BaseImage }{ { desc: "Match found: Base image layers are returned out of order", @@ -49,12 +49,13 @@ func TestMatchWithBaseImages(t *testing.T) { }, }, nil) }, - expected: []*storage.BaseImageInfo{ + expected: []*storage.BaseImage{ { - BaseImageId: "base-1", - BaseImageFullName: "rhel:8", - BaseImageDigest: "sha-base", - Created: testCreatedTime, + Id: "base-1", + Repository: "rhel", + Tag: "8", + ManifestDigest: "sha-base", + Created: testCreatedTime, }, }, }, @@ -99,8 +100,8 @@ func TestMatchWithBaseImages(t *testing.T) { }, }, nil) }, - expected: []*storage.BaseImageInfo{ - {BaseImageId: "match"}, + expected: []*storage.BaseImage{ + {Id: "match"}, }, }, { @@ -134,9 +135,10 @@ func TestMatchWithBaseImages(t *testing.T) { } else { require.Equal(t, len(tc.expected), len(actual)) for i := range tc.expected { - assert.Equal(t, tc.expected[i].GetBaseImageId(), actual[i].GetBaseImageId()) - if tc.expected[i].GetBaseImageFullName() != "" { - assert.Equal(t, tc.expected[i].GetBaseImageFullName(), actual[i].GetBaseImageFullName()) + assert.Equal(t, tc.expected[i].GetId(), actual[i].GetId()) + if tc.expected[i].GetRepository() != "" { + assert.Equal(t, tc.expected[i].GetRepository(), actual[i].GetRepository()) + assert.Equal(t, tc.expected[i].GetTag(), actual[i].GetTag()) } if tc.expected[i].GetCreated() != nil { assert.Equal(t, tc.expected[i].GetCreated(), actual[i].GetCreated()) diff --git a/central/baseimage/matcher/mocks/matcher.go b/central/baseimage/matcher/mocks/matcher.go index fb015f20f03e7..567d6287b7d72 100644 --- a/central/baseimage/matcher/mocks/matcher.go +++ b/central/baseimage/matcher/mocks/matcher.go @@ -42,10 +42,10 @@ func (m *MockMatcher) EXPECT() *MockMatcherMockRecorder { } // MatchWithBaseImages mocks base method. -func (m *MockMatcher) MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) { +func (m *MockMatcher) MatchWithBaseImages(ctx context.Context, layers []string) ([]*storage.BaseImage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MatchWithBaseImages", ctx, layers) - ret0, _ := ret[0].([]*storage.BaseImageInfo) + ret0, _ := ret[0].([]*storage.BaseImage) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/central/graphql/resolvers/image_components_v2_postgres_test.go b/central/graphql/resolvers/image_components_v2_postgres_test.go index d31eab72f83f2..2fde535cf68a9 100644 --- a/central/graphql/resolvers/image_components_v2_postgres_test.go +++ b/central/graphql/resolvers/image_components_v2_postgres_test.go @@ -128,7 +128,7 @@ func (s *GraphQLImageComponentV2TestSuite) TestImageComponents() { } inBaseImageLayerMap := map[string]bool{ s.componentIDMap[comp11]: false, - s.componentIDMap[comp12]: true, + s.componentIDMap[comp12]: false, // comp1 on image2 has no LayerIndex, so we can't determine origin s.componentIDMap[comp21]: false, s.componentIDMap[comp31]: false, s.componentIDMap[comp32]: true, diff --git a/central/graphql/resolvers/test_utils.go b/central/graphql/resolvers/test_utils.go index d8ffb0e60db62..08ecb07643efa 100644 --- a/central/graphql/resolvers/test_utils.go +++ b/central/graphql/resolvers/test_utils.go @@ -238,12 +238,14 @@ func testImages() []*storage.Image { BaseImageFullName: "busybox:latest", BaseImageDigest: "sha256:alpine312", Created: t3, + MaxLayerIndex: 10, }, { BaseImageId: "base-sha3", BaseImageFullName: "alpine:3.12", BaseImageDigest: "sha256:busybox1", Created: t3, + MaxLayerIndex: 10, }, }, }, diff --git a/central/image/datastore/store/common/v2/parts_test.go b/central/image/datastore/store/common/v2/parts_test.go index a67fbc13d45b5..71aefdbfef53b 100644 --- a/central/image/datastore/store/common/v2/parts_test.go +++ b/central/image/datastore/store/common/v2/parts_test.go @@ -162,11 +162,13 @@ func TestSplitAndMergeImage(t *testing.T) { BaseImageId: "some-id", BaseImageFullName: "registry.example.com/ns/base:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, { BaseImageId: "another-id", BaseImageFullName: "registry.example.com/ns/other:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, }, Metadata: &storage.ImageMetadata{ @@ -302,11 +304,13 @@ func TestSplitAndMergeImage(t *testing.T) { BaseImageId: "some-id", BaseImageFullName: "registry.example.com/ns/base:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, { BaseImageId: "another-id", BaseImageFullName: "registry.example.com/ns/other:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, }, Scan: &storage.ImageScan{ diff --git a/central/image/datastore/store/common/v2/split_v2.go b/central/image/datastore/store/common/v2/split_v2.go index 3d103025269ee..1489bf96cc346 100644 --- a/central/image/datastore/store/common/v2/split_v2.go +++ b/central/image/datastore/store/common/v2/split_v2.go @@ -98,8 +98,10 @@ func GenerateImageComponentV2(os string, image *storage.Image, index int, from * } ret.LayerType = storage.LayerType_APPLICATION - if len(image.GetBaseImageInfo()) > 0 { - ret.LayerType = storage.LayerType_BASE_IMAGE + if len(image.GetBaseImageInfo()) > 0 && from.GetHasLayerIndex() != nil { + if from.GetLayerIndex() <= image.GetBaseImageInfo()[0].GetMaxLayerIndex() { + ret.LayerType = storage.LayerType_BASE_IMAGE + } } return ret, nil } diff --git a/central/imagev2/datastore/store/common/parts_test.go b/central/imagev2/datastore/store/common/parts_test.go index c720ac0345818..3d8f59ac2f926 100644 --- a/central/imagev2/datastore/store/common/parts_test.go +++ b/central/imagev2/datastore/store/common/parts_test.go @@ -175,11 +175,13 @@ func TestSplitAndMergeImageV2(t *testing.T) { BaseImageId: "some-id", BaseImageFullName: "registry.example.com/ns/base:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, { BaseImageId: "another-id", BaseImageFullName: "registry.example.com/ns/other:tag", BaseImageDigest: "sha256:...", + MaxLayerIndex: 3, }, }, Metadata: &storage.ImageMetadata{ @@ -323,11 +325,13 @@ func TestSplitAndMergeImageV2(t *testing.T) { { BaseImageId: "some-id", BaseImageFullName: "registry.example.com/ns/base:tag", + MaxLayerIndex: 3, BaseImageDigest: "sha256:...", }, { BaseImageId: "another-id", BaseImageFullName: "registry.example.com/ns/other:tag", + MaxLayerIndex: 3, BaseImageDigest: "sha256:...", }, }, diff --git a/central/imagev2/datastore/store/common/split.go b/central/imagev2/datastore/store/common/split.go index 11c5ebd383ddf..60d4829f87145 100644 --- a/central/imagev2/datastore/store/common/split.go +++ b/central/imagev2/datastore/store/common/split.go @@ -102,8 +102,10 @@ func GenerateImageComponentV2(os string, image *storage.ImageV2, index int, from } ret.LayerType = storage.LayerType_APPLICATION - if len(image.GetBaseImageInfo()) > 0 { - ret.LayerType = storage.LayerType_BASE_IMAGE + if len(image.GetBaseImageInfo()) > 0 && from.GetHasLayerIndex() != nil { + if from.GetLayerIndex() <= image.GetBaseImageInfo()[0].GetMaxLayerIndex() { + ret.LayerType = storage.LayerType_BASE_IMAGE + } } return ret, nil } diff --git a/generated/api/v1/image_service.swagger.json b/generated/api/v1/image_service.swagger.json index efd5c5e4e3c56..c294f0865c1f0 100644 --- a/generated/api/v1/image_service.swagger.json +++ b/generated/api/v1/image_service.swagger.json @@ -600,6 +600,11 @@ "created": { "type": "string", "format": "date-time" + }, + "maxLayerIndex": { + "type": "integer", + "format": "int32", + "description": "Index of the last base image layer, taking into account\n\"empty layers\" (aka. metadata history without SHA)." } } }, diff --git a/generated/api/v1/vuln_mgmt_service.swagger.json b/generated/api/v1/vuln_mgmt_service.swagger.json index b40e5656ed951..5655f69e2ed52 100644 --- a/generated/api/v1/vuln_mgmt_service.swagger.json +++ b/generated/api/v1/vuln_mgmt_service.swagger.json @@ -306,6 +306,11 @@ "created": { "type": "string", "format": "date-time" + }, + "maxLayerIndex": { + "type": "integer", + "format": "int32", + "description": "Index of the last base image layer, taking into account\n\"empty layers\" (aka. metadata history without SHA)." } } }, diff --git a/generated/storage/image.pb.go b/generated/storage/image.pb.go index bef9f84569e75..102c049cfda62 100644 --- a/generated/storage/image.pb.go +++ b/generated/storage/image.pb.go @@ -1879,8 +1879,11 @@ type BaseImageInfo struct { BaseImageFullName string `protobuf:"bytes,2,opt,name=base_image_full_name,json=baseImageFullName,proto3" json:"base_image_full_name,omitempty"` BaseImageDigest string `protobuf:"bytes,3,opt,name=base_image_digest,json=baseImageDigest,proto3" json:"base_image_digest,omitempty"` Created *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created,proto3" json:"created,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Index of the last base image layer, taking into account + // "empty layers" (aka. metadata history without SHA). + MaxLayerIndex int32 `protobuf:"varint,5,opt,name=max_layer_index,json=maxLayerIndex,proto3" json:"max_layer_index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BaseImageInfo) Reset() { @@ -1941,6 +1944,13 @@ func (x *BaseImageInfo) GetCreated() *timestamppb.Timestamp { return nil } +func (x *BaseImageInfo) GetMaxLayerIndex() int32 { + if x != nil { + return x.MaxLayerIndex + } + return 0 +} + type EmbeddedImageScanComponent_Executable struct { state protoimpl.MessageState `protogen:"open.v1"` Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` @@ -2170,12 +2180,13 @@ const file_storage_image_proto_rawDesc = "" + "\vset_fixableJ\x04\b\t\x10\n" + "\"\"\n" + "\fWatchedImage\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\"\xc6\x01\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"\xee\x01\n" + "\rBaseImageInfo\x12\"\n" + "\rbase_image_id\x18\x01 \x01(\tR\vbaseImageId\x12/\n" + "\x14base_image_full_name\x18\x02 \x01(\tR\x11baseImageFullName\x12*\n" + "\x11base_image_digest\x18\x03 \x01(\tR\x0fbaseImageDigest\x124\n" + - "\acreated\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\acreated*s\n" + + "\acreated\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\acreated\x12&\n" + + "\x0fmax_layer_index\x18\x05 \x01(\x05R\rmaxLayerIndex*s\n" + "\n" + "SourceType\x12\x06\n" + "\x02OS\x10\x00\x12\n" + diff --git a/generated/storage/image_vtproto.pb.go b/generated/storage/image_vtproto.pb.go index 733d77470c9f8..5424204c95f3a 100644 --- a/generated/storage/image_vtproto.pb.go +++ b/generated/storage/image_vtproto.pb.go @@ -647,6 +647,7 @@ func (m *BaseImageInfo) CloneVT() *BaseImageInfo { r.BaseImageFullName = m.BaseImageFullName r.BaseImageDigest = m.BaseImageDigest r.Created = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.Created).CloneVT()) + r.MaxLayerIndex = m.MaxLayerIndex if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1700,6 +1701,9 @@ func (this *BaseImageInfo) EqualVT(that *BaseImageInfo) bool { if !(*timestamppb1.Timestamp)(this.Created).EqualVT((*timestamppb1.Timestamp)(that.Created)) { return false } + if this.MaxLayerIndex != that.MaxLayerIndex { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -3301,6 +3305,11 @@ func (m *BaseImageInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MaxLayerIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxLayerIndex)) + i-- + dAtA[i] = 0x28 + } if m.Created != nil { size, err := (*timestamppb1.Timestamp)(m.Created).MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -4029,6 +4038,9 @@ func (m *BaseImageInfo) SizeVT() (n int) { l = (*timestamppb1.Timestamp)(m.Created).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.MaxLayerIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxLayerIndex)) + } n += len(m.unknownFields) return n } @@ -8070,6 +8082,25 @@ func (m *BaseImageInfo) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLayerIndex", wireType) + } + m.MaxLayerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxLayerIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -12282,6 +12313,25 @@ func (m *BaseImageInfo) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLayerIndex", wireType) + } + m.MaxLayerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxLayerIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/images/enricher/enricher.go b/pkg/images/enricher/enricher.go index da254d9f9c4ff..7f21c97754e01 100644 --- a/pkg/images/enricher/enricher.go +++ b/pkg/images/enricher/enricher.go @@ -145,7 +145,7 @@ type CVESuppressor interface { type ImageGetter func(ctx context.Context, id string) (*storage.Image, bool, error) // BaseImageGetter will be used to get base images of a given image -type BaseImageGetter func(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) +type BaseImageGetter func(ctx context.Context, layers []string) ([]*storage.BaseImage, error) // SignatureIntegrationGetter will be used to retrieve all available signature integrations. type SignatureIntegrationGetter func(ctx context.Context) ([]*storage.SignatureIntegration, error) diff --git a/pkg/images/enricher/enricher_impl.go b/pkg/images/enricher/enricher_impl.go index ab7492499560a..3e8f8d791053e 100644 --- a/pkg/images/enricher/enricher_impl.go +++ b/pkg/images/enricher/enricher_impl.go @@ -29,6 +29,7 @@ import ( scannerTypes "github.com/stackrox/rox/pkg/scanners/types" "github.com/stackrox/rox/pkg/signatures" "github.com/stackrox/rox/pkg/sync" + pkgUtils "github.com/stackrox/rox/pkg/utils" scannerV1 "github.com/stackrox/scanner/generated/scanner/api/v1" "golang.org/x/time/rate" ) @@ -311,7 +312,7 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment } if features.BaseImageDetection.Enabled() { - image.BaseImageInfo, err = e.baseImageGetter(ctx, image.GetMetadata().GetLayerShas()) + matchedBaseImages, err := e.baseImageGetter(ctx, image.GetMetadata().GetLayerShas()) if err != nil { log.Warnw("Matching image with base images", logging.FromContext(ctx), @@ -319,6 +320,7 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment logging.Err(err), logging.String("request_image", image.GetName().GetFullName())) } + image.BaseImageInfo = toBaseImageInfos(image.GetMetadata(), matchedBaseImages) } return EnrichmentResult{ @@ -1060,3 +1062,95 @@ func FillScanStats(i *storage.Image) { } } } + +// toBaseImageInfos converts matched BaseImage objects to BaseImageInfo for storage in the image. +// It computes the max layer index based on the first base image's metadata. +func toBaseImageInfos(metadata *storage.ImageMetadata, baseImages []*storage.BaseImage) []*storage.BaseImageInfo { + if len(baseImages) == 0 { + return nil + } + + // Verify all base images have the same layer count. + // If not, this indicates a bug in the matcher (e.g., returning nested base images). + firstLayerCount := len(baseImages[0].GetLayers()) + for _, bi := range baseImages[1:] { + if len(bi.GetLayers()) != firstLayerCount { + pkgUtils.Should(errors.Errorf( + "base images have inconsistent layer counts: %s has %d layers, %s has %d layers", + baseImages[0].GetId(), firstLayerCount, bi.GetId(), len(bi.GetLayers()))) + break + } + } + + maxIndex, err := resolveLayerBoundary(metadata, firstLayerCount) + if err != nil { + log.Warnw("Failed to resolve base image layer boundary, ignoring base image matches", + logging.String("base_image_id", baseImages[0].GetId()), + logging.Err(err)) + return nil + } + + infos := make([]*storage.BaseImageInfo, 0, len(baseImages)) + for _, bi := range baseImages { + infos = append(infos, &storage.BaseImageInfo{ + BaseImageId: bi.GetId(), + BaseImageFullName: fmt.Sprintf("%s:%s", bi.GetRepository(), bi.GetTag()), + BaseImageDigest: bi.GetManifestDigest(), + Created: bi.GetCreated(), + MaxLayerIndex: int32(maxIndex), + }) + } + return infos +} + +// resolveLayerBoundary converts the base image's content layer count to the +// manifest index of its last layer. +// +// Components use manifest indices (which include empty layers), while base image +// layer counts exclude empty layers. This function bridges that gap by recounting +// the last layer index using empty layers when the image has V2 metadata. +func resolveLayerBoundary(metadata *storage.ImageMetadata, baseImageLayerCount int) (int, error) { + if baseImageLayerCount <= 0 { + return 0, errors.New("base image has no content layers") + } + + metadataLayers := metadata.GetV1().GetLayers() + layerIndex := baseImageLayerCount - 1 + + // If V1 layer metadata is available and V2 exists, account for empty + // layers by finding the actual manifest index. + if len(metadataLayers) > 0 && metadata.GetV2() != nil { + contentIndex := 0 + for i, l := range metadataLayers { + if l.GetEmpty() { + continue + } + if contentIndex == layerIndex { + return i, nil + } + contentIndex++ + } + return 0, fmt.Errorf( + "base image claims %d content layers but image only has %d non-empty layers", + baseImageLayerCount, contentIndex) + } + + // V1-only, or V2 without V1 layer metadata: content index equals manifest + // index directly. Use LayerShas for validation since it always contains + // content layers. + layerCount := len(metadata.GetLayerShas()) + if layerCount == 0 { + // This is not expected, but since an empty layer SHA list should never match, we + // attempt to check layer count based on V1 manifest layer count. + layerCount = len(metadataLayers) + } + if layerCount == 0 { + return 0, errors.New("invalid base image match: image has no layer metadata") + } + if layerIndex >= layerCount { + return 0, fmt.Errorf( + "invalid base image match: base image claims %d layers but image only has %d", + baseImageLayerCount, layerCount) + } + return layerIndex, nil +} diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index 6b6f5fcb17801..9035dc89b842f 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -1388,11 +1388,15 @@ func TestEnrichImageWithBaseImages(t *testing.T) { const expectedDigest = "sha256:abcdef123456" // CHANGE: Replace mockMatcher with a function closure - mockBaseImageGetter := func(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) { - return []*storage.BaseImageInfo{ + mockBaseImageGetter := func(ctx context.Context, layers []string) ([]*storage.BaseImage, error) { + return []*storage.BaseImage{ { - BaseImageFullName: expectedName, - BaseImageDigest: expectedDigest, + Repository: "docker.io/library/alpine", + Tag: "3.18", + ManifestDigest: expectedDigest, + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "sha1", Index: 0}, + }, }, }, nil } @@ -1421,6 +1425,12 @@ func TestEnrichImageWithBaseImages(t *testing.T) { Metadata: &storage.ImageMetadata{ LayerShas: []string{"sha1", "sha2"}, DataSource: &storage.DataSource{Id: "test-id"}, + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Instruction: "ADD"}, + {Instruction: "RUN"}, + }, + }, }, } @@ -1440,3 +1450,199 @@ func newEnricher(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) I emptyImageGetter, mockReporter, emptySignatureIntegrationGetter, nil) } + +func TestResolveLayerBoundary(t *testing.T) { + cases := map[string]struct { + metadata *storage.ImageMetadata + baseContentLayers int + expected int + expectError bool + }{ + "V2 present with empty layers": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Empty: true}, // 0: empty (ENV) + {Empty: false}, // 1: content layer 0 + {Empty: false}, // 2: content layer 1 + {Empty: true}, // 3: empty (LABEL) + {Empty: false}, // 4: content layer 2 + }, + }, + V2: &storage.V2Metadata{}, + }, + baseContentLayers: 2, // base has 2 content layers + expected: 2, // manifest index of last base layer + }, + "V2 present all content layers": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Empty: false}, // 0 + {Empty: false}, // 1 + {Empty: false}, // 2 + }, + }, + V2: &storage.V2Metadata{}, + }, + baseContentLayers: 2, + expected: 1, + }, + "V1 only - direct mapping": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {}, // 0 + {}, // 1 + {}, // 2 + }, + }, + // No V2 + }, + baseContentLayers: 2, + expected: 1, + }, + "error: no content layers": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{{Empty: false}}, + }, + V2: &storage.V2Metadata{}, + }, + baseContentLayers: 0, + expectError: true, + }, + "error: not enough layers V1": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{{}}, + }, + }, + baseContentLayers: 5, + expectError: true, + }, + "error: not enough content layers V2": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Empty: true}, + {Empty: false}, + }, + }, + V2: &storage.V2Metadata{}, + }, + baseContentLayers: 5, + expectError: true, + }, + "V2 without V1 layers - uses LayerShas": { + metadata: &storage.ImageMetadata{ + V2: &storage.V2Metadata{}, + LayerShas: []string{"sha1", "sha2", "sha3"}, + // V1 is nil or has no layers + }, + baseContentLayers: 2, + expected: 1, + }, + "no layer metadata at all": { + metadata: &storage.ImageMetadata{}, + baseContentLayers: 2, + expectError: true, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + result, err := resolveLayerBoundary(c.metadata, c.baseContentLayers) + if c.expectError { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, c.expected, result) + } + }) + } +} + +func TestToBaseImageInfos(t *testing.T) { + cases := map[string]struct { + metadata *storage.ImageMetadata + baseImages []*storage.BaseImage + expected []*storage.BaseImageInfo + }{ + "computes max layer index with empty layers": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Empty: false}, // 0: content 0 + {Empty: true}, // 1: empty + {Empty: false}, // 2: content 1 + {Empty: false}, // 3: content 2 (app layer) + }, + }, + V2: &storage.V2Metadata{}, + }, + baseImages: []*storage.BaseImage{ + { + Id: "base-1", + Repository: "rhel", + Tag: "8", + ManifestDigest: "sha256:abc", + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "layer-1", Index: 0}, + {LayerDigest: "layer-2", Index: 1}, + }, + }, + }, + expected: []*storage.BaseImageInfo{ + { + BaseImageId: "base-1", + BaseImageFullName: "rhel:8", + BaseImageDigest: "sha256:abc", + MaxLayerIndex: 2, // content layer 1 is at manifest index 2 + }, + }, + }, + "skips base image if boundary resolution fails": { + metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Empty: false}, + }, + }, + V2: &storage.V2Metadata{}, + }, + baseImages: []*storage.BaseImage{ + { + Id: "bad-base", + Repository: "too-many", + Tag: "layers", + Layers: []*storage.BaseImageLayer{ + {}, {}, {}, {}, {}, // 5 layers but image only has 1 + }, + }, + }, + expected: nil, // skipped due to error + }, + "empty base images returns nil": { + metadata: &storage.ImageMetadata{}, + baseImages: nil, + expected: nil, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + result := toBaseImageInfos(c.metadata, c.baseImages) + if c.expected == nil { + assert.Empty(t, result) + } else { + require.Len(t, result, len(c.expected)) + for i, exp := range c.expected { + assert.Equal(t, exp.GetBaseImageId(), result[i].GetBaseImageId()) + assert.Equal(t, exp.GetBaseImageFullName(), result[i].GetBaseImageFullName()) + assert.Equal(t, exp.GetMaxLayerIndex(), result[i].GetMaxLayerIndex()) + } + } + }) + } +} diff --git a/pkg/images/enricher/enricher_v2.go b/pkg/images/enricher/enricher_v2.go index e564d7bd4f005..169ac47b396f1 100644 --- a/pkg/images/enricher/enricher_v2.go +++ b/pkg/images/enricher/enricher_v2.go @@ -35,7 +35,7 @@ type ImageEnricherV2 interface { type ImageGetterV2 func(ctx context.Context, id string) (*storage.ImageV2, bool, error) // BaseImageGetterV2 will be used to get base images of a given image -type BaseImageGetterV2 func(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) +type BaseImageGetterV2 func(ctx context.Context, layers []string) ([]*storage.BaseImage, error) // NewV2 returns a new ImageEnricherV2 instance for the given subsystem. // (The subsystem is just used for Prometheus metrics.) diff --git a/pkg/images/enricher/enricher_v2_impl.go b/pkg/images/enricher/enricher_v2_impl.go index e339e7541f33c..c99d8f7076b10 100644 --- a/pkg/images/enricher/enricher_v2_impl.go +++ b/pkg/images/enricher/enricher_v2_impl.go @@ -296,7 +296,7 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) if features.BaseImageDetection.Enabled() { - imageV2.BaseImageInfo, err = e.baseImageGetter(ctx, imageV2.GetMetadata().GetLayerShas()) + matchedBaseImages, err := e.baseImageGetter(ctx, imageV2.GetMetadata().GetLayerShas()) if err != nil { log.Warnw("Matching image with base images", logging.FromContext(ctx), @@ -304,6 +304,7 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme logging.Err(err), logging.String("request_image", imageV2.GetName().GetFullName())) } + imageV2.BaseImageInfo = toBaseImageInfos(imageV2.GetMetadata(), matchedBaseImages) } if !errorList.Empty() { diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index 660d27d28be4f..cfa31cb4f0779 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -1250,11 +1250,15 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { const expectedDigest = "sha256:abcdef123456" // CHANGE: Define the mock function instead of the gomock matcher - mockBaseImageGetter := func(ctx context.Context, layers []string) ([]*storage.BaseImageInfo, error) { - return []*storage.BaseImageInfo{ + mockBaseImageGetter := func(ctx context.Context, layers []string) ([]*storage.BaseImage, error) { + return []*storage.BaseImage{ { - BaseImageFullName: expectedName, - BaseImageDigest: expectedDigest, + Repository: "docker.io/library/alpine", + Tag: "3.18", + ManifestDigest: expectedDigest, + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "sha1", Index: 0}, + }, }, }, nil } @@ -1285,6 +1289,12 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { Metadata: &storage.ImageMetadata{ LayerShas: []string{"sha1", "sha2"}, DataSource: &storage.DataSource{Id: "test-id"}, + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{ + {Instruction: "ADD"}, + {Instruction: "RUN"}, + }, + }, }, } diff --git a/proto/storage/image.proto b/proto/storage/image.proto index a2104b1f43eda..6b81ad3ceb282 100644 --- a/proto/storage/image.proto +++ b/proto/storage/image.proto @@ -260,4 +260,7 @@ message BaseImageInfo { string base_image_full_name = 2; string base_image_digest = 3; google.protobuf.Timestamp created = 4; + // Index of the last base image layer, taking into account + // "empty layers" (aka. metadata history without SHA). + int32 max_layer_index = 5; } diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index a168c3d3e088c..0be9b354dc92e 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -9491,6 +9491,11 @@ "id": 4, "name": "created", "type": "google.protobuf.Timestamp" + }, + { + "id": 5, + "name": "max_layer_index", + "type": "int32" } ] } From abcca37d15ac36518b2bc25e8f67a727c9e618c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Fri, 30 Jan 2026 09:26:02 +0100 Subject: [PATCH 081/232] ROX-28465: Disable pprof servers if continuous profiling is enabled (#18725) --- central/main.go | 4 ++++ scanner/cmd/scanner/main.go | 24 +++++++++++++++--------- sensor/common/sensor/sensor.go | 4 +++- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/central/main.go b/central/main.go index b002a8d387a2c..0a9e85f9e1886 100644 --- a/central/main.go +++ b/central/main.go @@ -970,6 +970,10 @@ func customRoutes() (customRoutes []routes.CustomRoute) { } func debugRoutes() []routes.CustomRoute { + if env.ContinuousProfiling.BooleanSetting() { + return []routes.CustomRoute{} + } + customRoutes := make([]routes.CustomRoute, 0, len(routes.DebugRoutes)) for r, h := range routes.DebugRoutes { diff --git a/scanner/cmd/scanner/main.go b/scanner/cmd/scanner/main.go index 0c20a36975dfa..0cdacf1f26669 100644 --- a/scanner/cmd/scanner/main.go +++ b/scanner/cmd/scanner/main.go @@ -16,6 +16,7 @@ import ( "github.com/rs/zerolog/log" "github.com/stackrox/rox/pkg/buildinfo" "github.com/stackrox/rox/pkg/continuousprofiling" + "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/grpc" "github.com/stackrox/rox/pkg/grpc/authn" @@ -167,15 +168,20 @@ func createGRPCService(backends *Backends, cfg *config.Config) (grpc.API, error) } // Custom routes: debugging. - customRoutes := make([]routes.CustomRoute, 0, len(routes.DebugRoutes)+ - len(backends.HealthRoutes())) - for path, handler := range routes.DebugRoutes { - customRoutes = append(customRoutes, routes.CustomRoute{ - Route: path, - Authorizer: allow.Anonymous(), - ServerHandler: handler, - Compression: true, - }) + var customRoutes []routes.CustomRoute + if !env.ContinuousProfiling.BooleanSetting() { + customRoutes = make([]routes.CustomRoute, 0, len(routes.DebugRoutes)+ + len(backends.HealthRoutes())) + for path, handler := range routes.DebugRoutes { + customRoutes = append(customRoutes, routes.CustomRoute{ + Route: path, + Authorizer: allow.Anonymous(), + ServerHandler: handler, + Compression: true, + }) + } + } else { + customRoutes = make([]routes.CustomRoute, 0, len(backends.HealthRoutes())) } // Custom routes: health checking. diff --git a/sensor/common/sensor/sensor.go b/sensor/common/sensor/sensor.go index 8debefab721d7..15d62f3183471 100644 --- a/sensor/common/sensor/sensor.go +++ b/sensor/common/sensor/sensor.go @@ -198,7 +198,9 @@ func (s *Sensor) Start() { } } s.imageService.SetClient(s.centralConnection) - s.profilingServer = s.startProfilingServer() + if !env.ContinuousProfiling.BooleanSetting() { + s.profilingServer = s.startProfilingServer() + } var centralReachable concurrency.Flag From eb006bd57605bb160d6c5bc6d5b1843bb123c59c Mon Sep 17 00:00:00 2001 From: Yi Li Date: Fri, 30 Jan 2026 02:26:33 -0600 Subject: [PATCH 082/232] ROX-32876: return base images with max layers (#18759) --- central/baseimage/matcher/matcher_impl.go | 17 +++++- .../baseimage/matcher/matcher_impl_test.go | 61 +++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/central/baseimage/matcher/matcher_impl.go b/central/baseimage/matcher/matcher_impl.go index 93d12bf6c8079..9b43bc5482dea 100644 --- a/central/baseimage/matcher/matcher_impl.go +++ b/central/baseimage/matcher/matcher_impl.go @@ -46,14 +46,19 @@ func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ( return nil, fmt.Errorf("listing candidates for layer %s: %w", firstLayer, err) } var baseImages []*storage.BaseImage + maxLayers := 0 + for _, c := range candidates { candidateLayers := c.GetLayers() slices.SortFunc(candidateLayers, func(a, b *storage.BaseImageLayer) int { return int(a.GetIndex() - b.GetIndex()) }) + + // base images should always have less layers than a target image if len(layers) <= len(candidateLayers) { continue } + match := true for i, l := range candidateLayers { if layers[i] != l.GetLayerDigest() { @@ -63,8 +68,18 @@ func (m matcherImpl) MatchWithBaseImages(ctx context.Context, layers []string) ( } if match { - baseImages = append(baseImages, c) + n := len(candidateLayers) + + if n > maxLayers { + // Found a better (longer) match: reset the slice and update max + maxLayers = n + baseImages = []*storage.BaseImage{c} + } else if n == maxLayers { + // Found another match of the same (maximum) length + baseImages = append(baseImages, c) + } } } + return baseImages, nil } diff --git a/central/baseimage/matcher/matcher_impl_test.go b/central/baseimage/matcher/matcher_impl_test.go index 06b1398b2150d..3cce996fdb865 100644 --- a/central/baseimage/matcher/matcher_impl_test.go +++ b/central/baseimage/matcher/matcher_impl_test.go @@ -121,6 +121,67 @@ func TestMatchWithBaseImages(t *testing.T) { }, nil) }, expected: nil, + }, { + desc: "Max layers only: Returns 3-layer match and ignores 2-layer match", + imgLayers: []string{"L1", "L2", "L3", "L4"}, + mockSetup: func() { + mockDS.EXPECT(). + ListCandidateBaseImages(gomock.Any(), "L1"). + Return([]*storage.BaseImage{ + { + Id: "short-match", // 2 layers + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "L1", Index: 0}, + {LayerDigest: "L2", Index: 1}, + }, + }, + { + Id: "long-match", // 3 layers - the winner + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "L1", Index: 0}, + {LayerDigest: "L2", Index: 1}, + {LayerDigest: "L3", Index: 2}, + }, + }, + }, nil) + }, + expected: []*storage.BaseImage{ + {Id: "long-match"}, + }, + }, + { + desc: "Max layers only: Returns multiple matches if they have same max length", + imgLayers: []string{"L1", "L2", "L3"}, + mockSetup: func() { + mockDS.EXPECT(). + ListCandidateBaseImages(gomock.Any(), "L1"). + Return([]*storage.BaseImage{ + { + Id: "match-A", + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "L1", Index: 0}, + {LayerDigest: "L2", Index: 1}, + }, + }, + { + Id: "match-B", + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "L1", Index: 0}, + {LayerDigest: "L2", Index: 1}, + }, + }, + { + Id: "too-short", + Layers: []*storage.BaseImageLayer{ + {LayerDigest: "L1", Index: 0}, + }, + }, + }, nil) + }, + expected: []*storage.BaseImage{ + {Id: "match-A"}, + {Id: "match-B"}, + }, }, } From 500e3e9eee6fe628c0dd5c9352fa22d7ec8c3d24 Mon Sep 17 00:00:00 2001 From: Mauro Ezequiel Moltrasio Date: Fri, 30 Jan 2026 12:49:44 +0100 Subject: [PATCH 083/232] fix(ci): add missing dependency on check-fact-version (#18774) --- .github/workflows/release-ci.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release-ci.yaml b/.github/workflows/release-ci.yaml index 42459aed8e2d1..f28143aa12eaf 100644 --- a/.github/workflows/release-ci.yaml +++ b/.github/workflows/release-ci.yaml @@ -84,7 +84,7 @@ jobs: check_fact_version publish-helm-charts: - needs: [build, check-scanner-version, check-collector-version, check-is-release] + needs: [build, check-scanner-version, check-collector-version, check-fact-version, check-is-release] if: needs.check-is-release.outputs.is_release == 'true' runs-on: ubuntu-latest steps: @@ -113,7 +113,7 @@ jobs: # Publish `roxagent` and `roxctl`. publish-cli: - needs: [build, check-scanner-version, check-collector-version, check-is-release] + needs: [build, check-scanner-version, check-collector-version, check-fact-version, check-is-release] if: needs.check-is-release.outputs.is_release == 'true' runs-on: ubuntu-latest steps: @@ -139,7 +139,7 @@ jobs: publish_cli "${STACKROX_TAG}" publish-openapispec: - needs: [build, check-scanner-version, check-collector-version, check-is-release] + needs: [build, check-scanner-version, check-collector-version, check-fact-version, check-is-release] if: needs.check-is-release.outputs.is_release == 'true' runs-on: ubuntu-latest steps: From 935e65167e6cca85a0cf43597a1be8886dd02b09 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Fri, 30 Jan 2026 19:50:47 +0100 Subject: [PATCH 084/232] chore(release): Advance `CHANGELOG.md` to the next release (#18763) Co-authored-by: crozzy --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1047217411b23..b7c7bd689d63f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,18 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ## [NEXT RELEASE] +### Added Features + +### Removed Features + +### Deprecated Features + +### Technical Changes + +## [4.10.0] + + + ### Added Features - ROX-31443: Automatic HTTP to HTTPS redirection is now enabled for Central OpenShift routes (passthrough and reencrypt). From 0ed8dad909908883289096e58c38cd0452bd778a Mon Sep 17 00:00:00 2001 From: Yi Li Date: Fri, 30 Jan 2026 13:53:04 -0600 Subject: [PATCH 085/232] ROX-32880: base image DB access (#18769) --- .../datastore/store/v2/postgres/store.go | 2 ++ .../imagev2/datastore/store/postgres/store.go | 2 ++ pkg/images/enricher/enricher_impl.go | 33 ++++++++++++------- pkg/images/enricher/enricher_v2_impl.go | 33 ++++++++++++------- 4 files changed, 46 insertions(+), 24 deletions(-) diff --git a/central/image/datastore/store/v2/postgres/store.go b/central/image/datastore/store/v2/postgres/store.go index cee5ba800b1bf..59e3921118c99 100644 --- a/central/image/datastore/store/v2/postgres/store.go +++ b/central/image/datastore/store/v2/postgres/store.go @@ -269,6 +269,7 @@ func (s *storeImpl) copyFromImageComponentsV2(ctx context.Context, tx *postgres. "operatingsystem", "imageid", "location", + "layertype", "serialized", } @@ -289,6 +290,7 @@ func (s *storeImpl) copyFromImageComponentsV2(ctx context.Context, tx *postgres. obj.GetOperatingSystem(), obj.GetImageId(), obj.GetLocation(), + obj.GetLayerType(), serialized, }) diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index 1127f419035a4..741bea8385c5d 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -295,6 +295,7 @@ func (s *storeImpl) copyFromImageComponentsV2(ctx context.Context, tx *postgres. "operatingsystem", "imageidv2", "location", + "layertype", "serialized", } @@ -315,6 +316,7 @@ func (s *storeImpl) copyFromImageComponentsV2(ctx context.Context, tx *postgres. obj.GetOperatingSystem(), obj.GetImageIdV2(), obj.GetLocation(), + obj.GetLayerType(), serialized, }) diff --git a/pkg/images/enricher/enricher_impl.go b/pkg/images/enricher/enricher_impl.go index 3e8f8d791053e..e65eaf1b5df62 100644 --- a/pkg/images/enricher/enricher_impl.go +++ b/pkg/images/enricher/enricher_impl.go @@ -26,6 +26,7 @@ import ( "github.com/stackrox/rox/pkg/protoutils" registryTypes "github.com/stackrox/rox/pkg/registries/types" "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/sac/resources" scannerTypes "github.com/stackrox/rox/pkg/scanners/types" "github.com/stackrox/rox/pkg/signatures" "github.com/stackrox/rox/pkg/sync" @@ -273,6 +274,26 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment updated = updated || didUpdateMetadata + if features.BaseImageDetection.Enabled() { + adminCtx := + sac.WithGlobalAccessScopeChecker(ctx, + sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), + sac.ResourceScopeKeys(resources.ImageAdministration), + ), + ) + matchedBaseImages, err := e.baseImageGetter(adminCtx, image.GetMetadata().GetLayerShas()) + if err != nil { + log.Warnw("Matching image with base images", + logging.FromContext(ctx), + logging.ImageID(image.GetId()), + logging.Err(err), + logging.String("request_image", image.GetName().GetFullName())) + } + image.BaseImageInfo = toBaseImageInfos(image.GetMetadata(), matchedBaseImages) + } + updated = updated || len(image.GetBaseImageInfo()) > 0 + // Update the image with existing values depending on the FetchOption provided or whether any are available. // This makes sure that we fetch any existing image only once from database. useExistingScanIfPossible := e.updateImageFromDatabase(ctx, image, enrichContext.FetchOpt) @@ -311,18 +332,6 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment errorList.AddError(delegateErr) } - if features.BaseImageDetection.Enabled() { - matchedBaseImages, err := e.baseImageGetter(ctx, image.GetMetadata().GetLayerShas()) - if err != nil { - log.Warnw("Matching image with base images", - logging.FromContext(ctx), - logging.ImageID(image.GetId()), - logging.Err(err), - logging.String("request_image", image.GetName().GetFullName())) - } - image.BaseImageInfo = toBaseImageInfos(image.GetMetadata(), matchedBaseImages) - } - return EnrichmentResult{ ImageUpdated: updated, ScanResult: scanResult, diff --git a/pkg/images/enricher/enricher_v2_impl.go b/pkg/images/enricher/enricher_v2_impl.go index c99d8f7076b10..39e110b37ac67 100644 --- a/pkg/images/enricher/enricher_v2_impl.go +++ b/pkg/images/enricher/enricher_v2_impl.go @@ -26,6 +26,7 @@ import ( "github.com/stackrox/rox/pkg/protoutils" registryTypes "github.com/stackrox/rox/pkg/registries/types" "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/sac/resources" scannerTypes "github.com/stackrox/rox/pkg/scanners/types" "github.com/stackrox/rox/pkg/signatures" "github.com/stackrox/rox/pkg/sync" @@ -261,6 +262,26 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme updated = updated || didUpdateMetadata + if features.BaseImageDetection.Enabled() { + adminCtx := + sac.WithGlobalAccessScopeChecker(ctx, + sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), + sac.ResourceScopeKeys(resources.ImageAdministration), + ), + ) + matchedBaseImages, err := e.baseImageGetter(adminCtx, imageV2.GetMetadata().GetLayerShas()) + if err != nil { + log.Warnw("Matching image with base images", + logging.FromContext(ctx), + logging.ImageID(imageV2.GetId()), + logging.Err(err), + logging.String("request_image", imageV2.GetName().GetFullName())) + } + imageV2.BaseImageInfo = toBaseImageInfos(imageV2.GetMetadata(), matchedBaseImages) + } + updated = updated || len(imageV2.GetBaseImageInfo()) > 0 + // Update the image with existing values depending on the FetchOption provided or whether any are available. // This makes sure that we fetch any existing image only once from database. useExistingScanIfPossible := e.updateImageFromDatabase(ctx, imageV2, enrichContext.FetchOpt) @@ -295,18 +316,6 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) - if features.BaseImageDetection.Enabled() { - matchedBaseImages, err := e.baseImageGetter(ctx, imageV2.GetMetadata().GetLayerShas()) - if err != nil { - log.Warnw("Matching image with base images", - logging.FromContext(ctx), - logging.ImageID(imageV2.GetId()), - logging.Err(err), - logging.String("request_image", imageV2.GetName().GetFullName())) - } - imageV2.BaseImageInfo = toBaseImageInfos(imageV2.GetMetadata(), matchedBaseImages) - } - if !errorList.Empty() { errorList.AddError(delegateErr) } From ea60441aded1632e8f9531b18fa44895c23acd25 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Sat, 31 Jan 2026 00:20:51 +0100 Subject: [PATCH 086/232] chore(release): Add 4.10.0 to scanner updater configuration (#18762) Co-authored-by: crozzy --- scanner/updater/version/RELEASE_VERSION | 1 + 1 file changed, 1 insertion(+) diff --git a/scanner/updater/version/RELEASE_VERSION b/scanner/updater/version/RELEASE_VERSION index 409f9926903b5..d408f27fcc697 100644 --- a/scanner/updater/version/RELEASE_VERSION +++ b/scanner/updater/version/RELEASE_VERSION @@ -6,6 +6,7 @@ # is not specified in this file, then the vulnerability updater workflow (a.) # won't take into account its vulnerability schema, and (b.) won't create the # offline bundle for that release. +4.10.0 4.4.0 4.4.1 4.4.2 From 41fe6c9cb593fc4b76a233562c806f43a02fa72d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Mon, 2 Feb 2026 10:30:37 +0100 Subject: [PATCH 087/232] ROX-32701: Add consumer_id to pubsub metrics for granular tracking (#18780) Co-authored-by: Claude Sonnet 4.5 --- sensor/common/component.go | 4 +-- sensor/common/pubsub/consumer/default.go | 29 ++++++++++++++++--- sensor/common/pubsub/consumer/default_test.go | 8 ++--- sensor/common/pubsub/consumerid.go | 24 +++++++++++++++ sensor/common/pubsub/dispatcher/dispatcher.go | 12 ++++---- .../pubsub/dispatcher/dispatcher_test.go | 26 ++++++++--------- sensor/common/pubsub/interfaces.go | 4 +-- sensor/common/pubsub/lane/default.go | 16 ++-------- sensor/common/pubsub/lane/default_test.go | 12 ++++---- sensor/common/pubsub/metrics/metrics.go | 16 +++++----- sensor/common/pubsub/mocks/interfaces.go | 8 ++--- .../eventpipeline/component/component.go | 4 +-- .../component/mocks/component.go | 16 +++++----- .../eventpipeline/resolver/resolver.go | 6 ++-- .../resolver/resolver_bench_test.go | 2 +- .../eventpipeline/resolver/resolver_test.go | 3 +- 16 files changed, 113 insertions(+), 77 deletions(-) create mode 100644 sensor/common/pubsub/consumerid.go diff --git a/sensor/common/component.go b/sensor/common/component.go index 1e51fa9af90cf..2c74f404334b5 100644 --- a/sensor/common/component.go +++ b/sensor/common/component.go @@ -113,8 +113,8 @@ type CentralGRPCConnAware interface { // PubSubDispatcher defines the interface to the internal PubSub system type PubSubDispatcher interface { - RegisterConsumer(pubsub.Topic, pubsub.EventCallback) error - RegisterConsumerToLane(pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error + RegisterConsumer(pubsub.ConsumerID, pubsub.Topic, pubsub.EventCallback) error + RegisterConsumerToLane(pubsub.ConsumerID, pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error Publish(pubsub.Event) error Stop() } diff --git a/sensor/common/pubsub/consumer/default.go b/sensor/common/pubsub/consumer/default.go index 75c8329f58044..24bb0884e696e 100644 --- a/sensor/common/pubsub/consumer/default.go +++ b/sensor/common/pubsub/consumer/default.go @@ -1,33 +1,54 @@ package consumer import ( + "time" + "github.com/pkg/errors" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/sensor/common/pubsub" pubsubErrors "github.com/stackrox/rox/sensor/common/pubsub/errors" + "github.com/stackrox/rox/sensor/common/pubsub/metrics" ) -func NewDefaultConsumer(callback pubsub.EventCallback, _ ...pubsub.ConsumerOption) (pubsub.Consumer, error) { +func NewDefaultConsumer(laneID pubsub.LaneID, topic pubsub.Topic, consumerID pubsub.ConsumerID, callback pubsub.EventCallback, _ ...pubsub.ConsumerOption) (pubsub.Consumer, error) { if callback == nil { return nil, errors.Wrap(pubsubErrors.UndefinedEventCallbackErr, "cannot create a consumer with a 'nil' callback") } return &DefaultConsumer{ - callback: callback, + laneID: laneID, + topic: topic, + consumerID: consumerID, + callback: callback, }, nil } type DefaultConsumer struct { - callback pubsub.EventCallback + laneID pubsub.LaneID + topic pubsub.Topic + consumerID pubsub.ConsumerID + callback pubsub.EventCallback } func (c *DefaultConsumer) Consume(waitable concurrency.Waitable, event pubsub.Event) <-chan error { errC := make(chan error) go func() { defer close(errC) + start := time.Now() + operation := metrics.Processed + select { - case errC <- c.callback(event): + case errC <- func() error { + err := c.callback(event) + if err != nil { + operation = metrics.ConsumerError + } + return err + }(): case <-waitable.Done(): + operation = metrics.ConsumerError } + metrics.ObserveProcessingDuration(c.laneID, c.topic, c.consumerID, time.Since(start), operation) + metrics.RecordConsumerOperation(c.laneID, c.topic, c.consumerID, operation) }() return errC } diff --git a/sensor/common/pubsub/consumer/default_test.go b/sensor/common/pubsub/consumer/default_test.go index 4c468d49e2f73..a17cdfb6942e4 100644 --- a/sensor/common/pubsub/consumer/default_test.go +++ b/sensor/common/pubsub/consumer/default_test.go @@ -23,13 +23,13 @@ func TestDefaultConsumer(t *testing.T) { func (s *defaultConsumerSuite) TestConsume() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("should error with nil callback", func() { - c, err := NewDefaultConsumer(nil) + c, err := NewDefaultConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, nil) s.Assert().Error(err) s.Assert().Nil(c) }) s.Run("should unblock if waitable is done", func() { callbackDone := concurrency.NewSignal() - c, err := NewDefaultConsumer(func(_ pubsub.Event) error { + c, err := NewDefaultConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(_ pubsub.Event) error { defer callbackDone.Signal() return errors.New("some error") }) @@ -47,7 +47,7 @@ func (s *defaultConsumerSuite) TestConsume() { s.Run("consume event error", func() { data := "some data" consumerSignal := concurrency.NewSignal() - c, err := NewDefaultConsumer(func(event pubsub.Event) error { + c, err := NewDefaultConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { defer consumerSignal.Signal() eventImpl, ok := event.(*testEvent) s.Require().True(ok) @@ -82,7 +82,7 @@ func (s *defaultConsumerSuite) TestConsume() { s.Run("consume event no error", func() { data := "some data" consumerSignal := concurrency.NewSignal() - c, err := NewDefaultConsumer(func(event pubsub.Event) error { + c, err := NewDefaultConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { defer consumerSignal.Signal() eventImpl, ok := event.(*testEvent) s.Require().True(ok) diff --git a/sensor/common/pubsub/consumerid.go b/sensor/common/pubsub/consumerid.go new file mode 100644 index 0000000000000..3fea4e4265057 --- /dev/null +++ b/sensor/common/pubsub/consumerid.go @@ -0,0 +1,24 @@ +package pubsub + +type ConsumerID int + +const ( + NoConsumers ConsumerID = iota + DefaultConsumer + ResolverConsumer +) + +var ( + consumerToString = map[ConsumerID]string{ + NoConsumers: "NoConsumers", + DefaultConsumer: "Default", + ResolverConsumer: "Resolver", + } +) + +func (c ConsumerID) String() string { + if consumerStr, ok := consumerToString[c]; ok { + return consumerStr + } + return "unknown" +} diff --git a/sensor/common/pubsub/dispatcher/dispatcher.go b/sensor/common/pubsub/dispatcher/dispatcher.go index d205f668198a8..81922ee783548 100644 --- a/sensor/common/pubsub/dispatcher/dispatcher.go +++ b/sensor/common/pubsub/dispatcher/dispatcher.go @@ -53,7 +53,7 @@ func (d *dispatcher) Publish(event pubsub.Event) error { return errors.Wrap(lane.Publish(event), "unable to publish event") } -func (d *dispatcher) RegisterConsumer(topic pubsub.Topic, callback pubsub.EventCallback) error { +func (d *dispatcher) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsub.Topic, callback pubsub.EventCallback) error { if callback == nil { return errors.New("cannot register a 'nil' callback") } @@ -61,14 +61,14 @@ func (d *dispatcher) RegisterConsumer(topic pubsub.Topic, callback pubsub.EventC defer d.laneLock.RUnlock() errList := errorhelpers.NewErrorList("register consumer") for _, lane := range d.lanes { - if err := d.registerConsumerToLane(topic, lane, callback); err != nil { + if err := d.registerConsumerToLane(consumerID, topic, lane, callback); err != nil { errList.AddErrors(err) } } return errList.ToError() } -func (d *dispatcher) RegisterConsumerToLane(topic pubsub.Topic, laneID pubsub.LaneID, callback pubsub.EventCallback) error { +func (d *dispatcher) RegisterConsumerToLane(consumerID pubsub.ConsumerID, topic pubsub.Topic, laneID pubsub.LaneID, callback pubsub.EventCallback) error { if callback == nil { return errors.New("cannot register a 'nil' callback") } @@ -76,7 +76,7 @@ func (d *dispatcher) RegisterConsumerToLane(topic pubsub.Topic, laneID pubsub.La if err != nil { return errors.Errorf("lane with ID %q not found: %v", laneID, err) } - return d.registerConsumerToLane(topic, lane, callback) + return d.registerConsumerToLane(consumerID, topic, lane, callback) } func (d *dispatcher) Stop() { @@ -97,8 +97,8 @@ func (d *dispatcher) getLane(id pubsub.LaneID) (pubsub.Lane, error) { return lane, nil } -func (d *dispatcher) registerConsumerToLane(topic pubsub.Topic, lane pubsub.Lane, callback pubsub.EventCallback) error { - return errors.Wrap(lane.RegisterConsumer(topic, callback), "unable to register consumer") +func (d *dispatcher) registerConsumerToLane(consumerID pubsub.ConsumerID, topic pubsub.Topic, lane pubsub.Lane, callback pubsub.EventCallback) error { + return errors.Wrap(lane.RegisterConsumer(consumerID, topic, callback), "unable to register consumer") } func (d *dispatcher) createLanes() error { diff --git a/sensor/common/pubsub/dispatcher/dispatcher_test.go b/sensor/common/pubsub/dispatcher/dispatcher_test.go index 8bab3a1f078db..27ca690ccae02 100644 --- a/sensor/common/pubsub/dispatcher/dispatcher_test.go +++ b/sensor/common/pubsub/dispatcher/dispatcher_test.go @@ -75,17 +75,17 @@ func (s *dispatcherSuite) Test_WithLaneConfigs() { func (s *dispatcherSuite) Test_RegisterConsumer() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("should not register nil callback", func() { - s.Assert().Error(s.d.RegisterConsumer(pubsub.DefaultTopic, nil)) + s.Assert().Error(s.d.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, nil)) }) s.Run("should error if lane RegisterConsumer fails", func() { - s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(errors.New("some error")) - s.Assert().Error(s.d.RegisterConsumer(pubsub.DefaultTopic, func(_ pubsub.Event) error { + s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultConsumer), gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(errors.New("some error")) + s.Assert().Error(s.d.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { return nil })) }) s.Run("success case", func() { - s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) - s.Assert().NoError(s.d.RegisterConsumer(pubsub.DefaultTopic, func(_ pubsub.Event) error { + s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultConsumer), gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) + s.Assert().NoError(s.d.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { return nil })) }) @@ -94,18 +94,18 @@ func (s *dispatcherSuite) Test_RegisterConsumer() { func (s *dispatcherSuite) Test_RegisterConsumerToLane() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("should not register nil callback", func() { - s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultTopic, pubsub.DefaultLane, nil)) + s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultConsumer, pubsub.DefaultTopic, pubsub.DefaultLane, nil)) }) s.Run("should error if lane does not exist", func() { - s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultTopic, -1, func(_ pubsub.Event) error { return nil })) + s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultConsumer, pubsub.DefaultTopic, -1, func(_ pubsub.Event) error { return nil })) }) s.Run("should error if lane RegisterConsumer fails", func() { - s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(errors.New("some error")) - s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultTopic, pubsub.DefaultLane, func(_ pubsub.Event) error { return nil })) + s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultConsumer), gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(errors.New("some error")) + s.Assert().Error(s.d.RegisterConsumerToLane(pubsub.DefaultConsumer, pubsub.DefaultTopic, pubsub.DefaultLane, func(_ pubsub.Event) error { return nil })) }) s.Run("success case", func() { - s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) - s.Assert().NoError(s.d.RegisterConsumerToLane(pubsub.DefaultTopic, pubsub.DefaultLane, func(_ pubsub.Event) error { return nil })) + s.lane.EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultConsumer), gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) + s.Assert().NoError(s.d.RegisterConsumerToLane(pubsub.DefaultConsumer, pubsub.DefaultTopic, pubsub.DefaultLane, func(_ pubsub.Event) error { return nil })) }) } @@ -171,8 +171,8 @@ func (s *dispatcherSuite) Test_Publish() { d, lanes := newDispatcher(s.T(), s.ctrl, s.defaultLanes) callback := tCase.callback(s.T(), &wg) s.Require().Len(lanes, 1) - lanes[0].EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) - s.Assert().NoError(d.RegisterConsumer(pubsub.DefaultTopic, callback)) + lanes[0].EXPECT().RegisterConsumer(gomock.Eq(pubsub.DefaultConsumer), gomock.Eq(pubsub.DefaultTopic), gomock.Any()).Times(1).Return(nil) + s.Assert().NoError(d.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, callback)) tCase.laneExpectCalls(lanes[0], callback) err := d.Publish(tCase.event) tCase.expectError(s.T(), err) diff --git a/sensor/common/pubsub/interfaces.go b/sensor/common/pubsub/interfaces.go index c94d9dfbf7d3c..35b9eae3a9c09 100644 --- a/sensor/common/pubsub/interfaces.go +++ b/sensor/common/pubsub/interfaces.go @@ -21,11 +21,11 @@ type LaneConfig interface { type Lane interface { Publish(Event) error - RegisterConsumer(Topic, EventCallback) error + RegisterConsumer(ConsumerID, Topic, EventCallback) error Stop() } -type NewConsumer func(EventCallback, ...ConsumerOption) (Consumer, error) +type NewConsumer func(laneID LaneID, topic Topic, consumerID ConsumerID, callback EventCallback, opts ...ConsumerOption) (Consumer, error) type Consumer interface { Consume(concurrency.Waitable, Event) <-chan error diff --git a/sensor/common/pubsub/lane/default.go b/sensor/common/pubsub/lane/default.go index b0251b81dbf64..a30a1a83e0513 100644 --- a/sensor/common/pubsub/lane/default.go +++ b/sensor/common/pubsub/lane/default.go @@ -1,8 +1,6 @@ package lane import ( - "time" - "github.com/pkg/errors" "github.com/stackrox/rox/pkg/concurrency" @@ -120,10 +118,7 @@ func (l *defaultLane) run() { } func (l *defaultLane) handleEvent(event pubsub.Event) error { - start := time.Now() - operation := metrics.Processed defer func() { - metrics.ObserveProcessingDuration(l.id, event.Topic(), time.Since(start), operation) metrics.SetQueueSize(l.id, len(l.ch)) }() @@ -131,7 +126,7 @@ func (l *defaultLane) handleEvent(event pubsub.Event) error { defer l.consumerLock.RUnlock() consumers, ok := l.consumers[event.Topic()] if !ok { - metrics.RecordConsumerOperation(l.id, event.Topic(), metrics.NoConsumers) + metrics.RecordConsumerOperation(l.id, event.Topic(), pubsub.NoConsumers, metrics.NoConsumers) return errors.Wrap(pubsubErrors.NewConsumersNotFoundForTopicErr(event.Topic(), l.id), "unable to handle event") } errList := errorhelpers.NewErrorList("handle event") @@ -146,19 +141,14 @@ func (l *defaultLane) handleEvent(event pubsub.Event) error { } } - if errList.ToError() != nil { - operation = metrics.ConsumerError - } - metrics.RecordConsumerOperation(l.id, event.Topic(), operation) - return errList.ToError() } -func (l *defaultLane) RegisterConsumer(topic pubsub.Topic, callback pubsub.EventCallback) error { +func (l *defaultLane) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsub.Topic, callback pubsub.EventCallback) error { if callback == nil { return errors.New("cannot register a 'nil' callback") } - c, err := l.newConsumerFn(callback, l.consumerOpts...) + c, err := l.newConsumerFn(l.id, topic, consumerID, callback, l.consumerOpts...) if err != nil { return errors.Wrap(err, "unable to create the consumer") } diff --git a/sensor/common/pubsub/lane/default_test.go b/sensor/common/pubsub/lane/default_test.go index ba5f827828f21..c627d77f98ac0 100644 --- a/sensor/common/pubsub/lane/default_test.go +++ b/sensor/common/pubsub/lane/default_test.go @@ -89,7 +89,7 @@ func (t *testLane) Publish(_ pubsub.Event) error { return nil } -func (t *testLane) RegisterConsumer(_ pubsub.Topic, _ pubsub.EventCallback) error { +func (t *testLane) RegisterConsumer(_ pubsub.ConsumerID, _ pubsub.Topic, _ pubsub.EventCallback) error { return nil } @@ -142,7 +142,7 @@ func (s *defaultLaneSuite) TestRegisterConsumer() { s.Run("should error on nil callback", func() { lane := NewDefaultLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) - assert.Error(s.T(), lane.RegisterConsumer(pubsub.DefaultTopic, nil)) + assert.Error(s.T(), lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, nil)) lane.Stop() }) } @@ -155,7 +155,7 @@ func (s *defaultLaneSuite) TestPublish() { unblockSig := concurrency.NewSignal() wg := sync.WaitGroup{} wg.Add(1) - assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultTopic, blockingCallback(&wg, &unblockSig))) + assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, blockingCallback(&wg, &unblockSig))) publishDone := concurrency.NewSignal() go func() { defer publishDone.Signal() @@ -188,7 +188,7 @@ func (s *defaultLaneSuite) TestPublish() { assert.NotNil(s.T(), lane) data := "some data" consumeSignal := concurrency.NewSignal() - assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultTopic, + assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, assertInCallback(s.T(), func(t *testing.T, event pubsub.Event) error { defer consumeSignal.Signal() eventImpl, ok := event.(*testEvent) @@ -206,7 +206,7 @@ func (s *defaultLaneSuite) TestPublish() { unblockSig := concurrency.NewSignal() wg := sync.WaitGroup{} wg.Add(1) - assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultTopic, blockingCallback(&wg, &unblockSig))) + assert.NoError(s.T(), lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, blockingCallback(&wg, &unblockSig))) publishDone := concurrency.NewSignal() firstPublishCallDone := concurrency.NewSignal() go func() { @@ -257,7 +257,7 @@ func (t *testEvent) Lane() pubsub.LaneID { return pubsub.DefaultLane } -func newTestConsumer(_ pubsub.EventCallback, _ ...pubsub.ConsumerOption) (pubsub.Consumer, error) { +func newTestConsumer(_ pubsub.LaneID, _ pubsub.Topic, _ pubsub.ConsumerID, _ pubsub.EventCallback, _ ...pubsub.ConsumerOption) (pubsub.Consumer, error) { return &testCustomConsumer{}, nil } diff --git a/sensor/common/pubsub/metrics/metrics.go b/sensor/common/pubsub/metrics/metrics.go index b4ef6dd83d0ac..6e9e67913a3b1 100644 --- a/sensor/common/pubsub/metrics/metrics.go +++ b/sensor/common/pubsub/metrics/metrics.go @@ -24,8 +24,8 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "pubsub_lane_consumer_operations_total", - Help: "Total number of pubsub lane consumer operations by lane, topic, and operation type", - }, []string{"lane_id", "topic", "operation"}) + Help: "Total number of pubsub lane consumer operations by lane, topic, consumer, and operation type", + }, []string{"lane_id", "topic", "consumer_id", "operation"}) // laneQueueSize tracks the current number of events in each lane's buffer. laneQueueSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -40,9 +40,9 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "pubsub_lane_event_processing_duration_seconds", - Help: "Time spent processing an event through all consumer callbacks", + Help: "Time spent processing an event by each consumer callback", Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5}, - }, []string{"lane_id", "topic", "operation"}) + }, []string{"lane_id", "topic", "consumer_id", "operation"}) // consumersCurrent tracks the number of registered consumers per lane/topic. consumersCurrent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -57,16 +57,16 @@ func RecordPublishOperation(laneID pubsub.LaneID, topic pubsub.Topic, operation lanePublishOperations.WithLabelValues(laneID.String(), topic.String(), operation.String()).Inc() } -func RecordConsumerOperation(laneID pubsub.LaneID, topic pubsub.Topic, operation Operation) { - laneConsumerOperations.WithLabelValues(laneID.String(), topic.String(), operation.String()).Inc() +func RecordConsumerOperation(laneID pubsub.LaneID, topic pubsub.Topic, consumerID pubsub.ConsumerID, operation Operation) { + laneConsumerOperations.WithLabelValues(laneID.String(), topic.String(), consumerID.String(), operation.String()).Inc() } func SetQueueSize(laneID pubsub.LaneID, size int) { laneQueueSize.WithLabelValues(laneID.String()).Set(float64(size)) } -func ObserveProcessingDuration(laneID pubsub.LaneID, topic pubsub.Topic, duration time.Duration, operation Operation) { - laneEventProcessingDuration.WithLabelValues(laneID.String(), topic.String(), operation.String()).Observe(duration.Seconds()) +func ObserveProcessingDuration(laneID pubsub.LaneID, topic pubsub.Topic, consumerID pubsub.ConsumerID, duration time.Duration, operation Operation) { + laneEventProcessingDuration.WithLabelValues(laneID.String(), topic.String(), consumerID.String(), operation.String()).Observe(duration.Seconds()) } func RecordConsumerCount(laneID pubsub.LaneID, topic pubsub.Topic, count int) { diff --git a/sensor/common/pubsub/mocks/interfaces.go b/sensor/common/pubsub/mocks/interfaces.go index 82cb79d2fb518..9b4f48df344f2 100644 --- a/sensor/common/pubsub/mocks/interfaces.go +++ b/sensor/common/pubsub/mocks/interfaces.go @@ -160,17 +160,17 @@ func (mr *MockLaneMockRecorder) Publish(arg0 any) *gomock.Call { } // RegisterConsumer mocks base method. -func (m *MockLane) RegisterConsumer(arg0 pubsub.Topic, arg1 pubsub.EventCallback) error { +func (m *MockLane) RegisterConsumer(arg0 pubsub.ConsumerID, arg1 pubsub.Topic, arg2 pubsub.EventCallback) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterConsumer", arg0, arg1) + ret := m.ctrl.Call(m, "RegisterConsumer", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // RegisterConsumer indicates an expected call of RegisterConsumer. -func (mr *MockLaneMockRecorder) RegisterConsumer(arg0, arg1 any) *gomock.Call { +func (mr *MockLaneMockRecorder) RegisterConsumer(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumer", reflect.TypeOf((*MockLane)(nil).RegisterConsumer), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumer", reflect.TypeOf((*MockLane)(nil).RegisterConsumer), arg0, arg1, arg2) } // Stop mocks base method. diff --git a/sensor/kubernetes/eventpipeline/component/component.go b/sensor/kubernetes/eventpipeline/component/component.go index 550eb938f4c27..91a92b17ed373 100644 --- a/sensor/kubernetes/eventpipeline/component/component.go +++ b/sensor/kubernetes/eventpipeline/component/component.go @@ -54,7 +54,7 @@ type ContextListener interface { // PubSubDispatcher defines the interface to the internal PubSub system type PubSubDispatcher interface { - RegisterConsumer(pubsub.Topic, pubsub.EventCallback) error - RegisterConsumerToLane(pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error + RegisterConsumer(pubsub.ConsumerID, pubsub.Topic, pubsub.EventCallback) error + RegisterConsumerToLane(pubsub.ConsumerID, pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error Publish(pubsub.Event) error } diff --git a/sensor/kubernetes/eventpipeline/component/mocks/component.go b/sensor/kubernetes/eventpipeline/component/mocks/component.go index 55ff0dc2fb1bb..b348eef6ca6ad 100644 --- a/sensor/kubernetes/eventpipeline/component/mocks/component.go +++ b/sensor/kubernetes/eventpipeline/component/mocks/component.go @@ -324,29 +324,29 @@ func (mr *MockPubSubDispatcherMockRecorder) Publish(arg0 any) *gomock.Call { } // RegisterConsumer mocks base method. -func (m *MockPubSubDispatcher) RegisterConsumer(arg0 pubsub.Topic, arg1 pubsub.EventCallback) error { +func (m *MockPubSubDispatcher) RegisterConsumer(arg0 pubsub.ConsumerID, arg1 pubsub.Topic, arg2 pubsub.EventCallback) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterConsumer", arg0, arg1) + ret := m.ctrl.Call(m, "RegisterConsumer", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // RegisterConsumer indicates an expected call of RegisterConsumer. -func (mr *MockPubSubDispatcherMockRecorder) RegisterConsumer(arg0, arg1 any) *gomock.Call { +func (mr *MockPubSubDispatcherMockRecorder) RegisterConsumer(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumer", reflect.TypeOf((*MockPubSubDispatcher)(nil).RegisterConsumer), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumer", reflect.TypeOf((*MockPubSubDispatcher)(nil).RegisterConsumer), arg0, arg1, arg2) } // RegisterConsumerToLane mocks base method. -func (m *MockPubSubDispatcher) RegisterConsumerToLane(arg0 pubsub.Topic, arg1 pubsub.LaneID, arg2 pubsub.EventCallback) error { +func (m *MockPubSubDispatcher) RegisterConsumerToLane(arg0 pubsub.ConsumerID, arg1 pubsub.Topic, arg2 pubsub.LaneID, arg3 pubsub.EventCallback) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterConsumerToLane", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "RegisterConsumerToLane", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // RegisterConsumerToLane indicates an expected call of RegisterConsumerToLane. -func (mr *MockPubSubDispatcherMockRecorder) RegisterConsumerToLane(arg0, arg1, arg2 any) *gomock.Call { +func (mr *MockPubSubDispatcherMockRecorder) RegisterConsumerToLane(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumerToLane", reflect.TypeOf((*MockPubSubDispatcher)(nil).RegisterConsumerToLane), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterConsumerToLane", reflect.TypeOf((*MockPubSubDispatcher)(nil).RegisterConsumerToLane), arg0, arg1, arg2, arg3) } diff --git a/sensor/kubernetes/eventpipeline/resolver/resolver.go b/sensor/kubernetes/eventpipeline/resolver/resolver.go index 1019a9378faf0..5a1e084cf084f 100644 --- a/sensor/kubernetes/eventpipeline/resolver/resolver.go +++ b/sensor/kubernetes/eventpipeline/resolver/resolver.go @@ -12,7 +12,7 @@ import ( ) type pubSubRegister interface { - RegisterConsumerToLane(pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error + RegisterConsumerToLane(pubsub.ConsumerID, pubsub.Topic, pubsub.LaneID, pubsub.EventCallback) error } // New instantiates a Resolver component. @@ -29,10 +29,10 @@ func New(outputQueue component.OutputQueue, provider store.Provider, queueSize i if pubsubDispatcher == nil { return nil, errors.Errorf("pubsub dispatcher is null and the feature flag %q is enabled", features.SensorInternalPubSub.EnvVar()) } - if err := pubsubDispatcher.RegisterConsumerToLane(pubsub.KubernetesDispatcherEventTopic, pubsub.KubernetesDispatcherEventLane, res.ProcessResourceEvent); err != nil { + if err := pubsubDispatcher.RegisterConsumerToLane(pubsub.ResolverConsumer, pubsub.KubernetesDispatcherEventTopic, pubsub.KubernetesDispatcherEventLane, res.ProcessResourceEvent); err != nil { return nil, errors.Wrapf(err, "unable to register resolver as consumer of topic %q in lane %q", pubsub.KubernetesDispatcherEventTopic.String(), pubsub.KubernetesDispatcherEventLane.String()) } - if err := pubsubDispatcher.RegisterConsumerToLane(pubsub.FromCentralResolverEventTopic, pubsub.FromCentralResolverEventLane, res.ProcessResourceEvent); err != nil { + if err := pubsubDispatcher.RegisterConsumerToLane(pubsub.ResolverConsumer, pubsub.FromCentralResolverEventTopic, pubsub.FromCentralResolverEventLane, res.ProcessResourceEvent); err != nil { return nil, errors.Wrapf(err, "unable to register resolver as consumer of topic %q in lane %q", pubsub.FromCentralResolverEventTopic.String(), pubsub.FromCentralResolverEventLane.String()) } } diff --git a/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go b/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go index b43aa432df703..8ac21a704d0b2 100644 --- a/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go +++ b/sensor/kubernetes/eventpipeline/resolver/resolver_bench_test.go @@ -145,7 +145,7 @@ func setupMocks(b *testing.B, doneSignal *concurrency.Signal, pubsubEnabled bool mockPubSubDispatcher = mocksComponent.NewMockPubSubDispatcher(mockCtrl) // Set up the EXPECT if pubsubEnabled { - mockPubSubDispatcher.EXPECT().RegisterConsumerToLane(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(nil) + mockPubSubDispatcher.EXPECT().RegisterConsumerToLane(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(nil) } mockOutput.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn(func(resourceEvent *component.ResourceEvent) { for _, m := range resourceEvent.ForwardMessages { diff --git a/sensor/kubernetes/eventpipeline/resolver/resolver_test.go b/sensor/kubernetes/eventpipeline/resolver/resolver_test.go index 3f142f61b79e3..51d550c474bd8 100644 --- a/sensor/kubernetes/eventpipeline/resolver/resolver_test.go +++ b/sensor/kubernetes/eventpipeline/resolver/resolver_test.go @@ -12,6 +12,7 @@ import ( "github.com/stackrox/rox/pkg/set" "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/sensor/common/clusterentities" + "github.com/stackrox/rox/sensor/common/pubsub" "github.com/stackrox/rox/sensor/common/registry" "github.com/stackrox/rox/sensor/common/service" "github.com/stackrox/rox/sensor/common/store" @@ -63,7 +64,7 @@ func (s *resolverSuite) SetupTest() { func (s *resolverSuite) newResolver(pubsubEnabled bool) component.Resolver { if pubsubEnabled { - s.mockPubSubDispatcher.EXPECT().RegisterConsumerToLane(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(nil) + s.mockPubSubDispatcher.EXPECT().RegisterConsumerToLane(gomock.Eq(pubsub.ResolverConsumer), gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(nil) } resolver, err := New(s.mockOutput, &fakeProvider{ deploymentStore: s.mockDeploymentStore, From 24f9049274c041196e64fd4ace47fc6c66938757 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:51:26 +0100 Subject: [PATCH 088/232] chore(deps): bump github.com/onsi/ginkgo/v2 from 2.27.5 to 2.28.1 (#18767) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 72f5323ce5240..c74f91c77b2e8 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/np-guard/netpol-analyzer v1.4.4 github.com/nxadm/tail v1.4.11 github.com/olekukonko/tablewriter v1.1.3 - github.com/onsi/ginkgo/v2 v2.27.5 + github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 @@ -344,7 +344,7 @@ require ( github.com/google/cel-go v0.26.1 // indirect github.com/google/go-github/v73 v73.0.0 // indirect github.com/google/go-querystring v1.2.0 // indirect - github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect + github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect github.com/gorilla/css v1.0.0 // indirect diff --git a/go.sum b/go.sum index bd54662e8860e..4218f7146626a 100644 --- a/go.sum +++ b/go.sum @@ -831,8 +831,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= @@ -1255,8 +1255,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.27.5 h1:ZeVgZMx2PDMdJm/+w5fE/OyG6ILo1Y3e+QX4zSR0zTE= -github.com/onsi/ginkgo/v2 v2.27.5/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= From 23b12dcd637cc8a948243b460277543c0820338f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:51:33 +0100 Subject: [PATCH 089/232] chore(deps): bump google.golang.org/api from 0.263.0 to 0.264.0 (#18766) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c74f91c77b2e8..1f4e59a685e6e 100644 --- a/go.mod +++ b/go.mod @@ -150,7 +150,7 @@ require ( golang.org/x/time v0.14.0 golang.org/x/tools v0.41.0 golang.stackrox.io/grpc-http1 v0.5.1 - google.golang.org/api v0.263.0 + google.golang.org/api v0.264.0 google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b google.golang.org/grpc v1.78.0 diff --git a/go.sum b/go.sum index 4218f7146626a..bc09b35870a0e 100644 --- a/go.sum +++ b/go.sum @@ -2176,8 +2176,8 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.263.0 h1:UFs7qn8gInIdtk1ZA6eXRXp5JDAnS4x9VRsRVCeKdbk= -google.golang.org/api v0.263.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= +google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM= +google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 4935da7d1ec678e870470f05c5aece4ca424f589 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 12:03:45 +0100 Subject: [PATCH 090/232] chore(deps): bump the aws-sdk-go-v2 group with 2 updates (#18744) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 1f4e59a685e6e..7fe5f1e2f2901 100644 --- a/go.mod +++ b/go.mod @@ -29,9 +29,9 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.32.7 github.com/aws/aws-sdk-go-v2/credentials v1.19.7 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 github.com/aws/smithy-go v1.24.0 diff --git a/go.sum b/go.sum index bc09b35870a0e..d11667283c79a 100644 --- a/go.sum +++ b/go.sum @@ -295,8 +295,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUT github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0 h1:pQZGI0qQXeCHZHMeWzhwPu+4jkWrdrIb2dgpG4OKmco= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.0/go.mod h1:XGq5kImVqQT4HUNbbG+0Y8O74URsPNH7CGPg1s1HW5E= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 h1:1hWFp+52Vq8Fevy/KUhbW/1MEApMz7uitCF/PQXRJpk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1/go.mod h1:sIec8j802/rCkCKgZV678HFR0s7lhQUYXT77tIvlaa4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= @@ -319,8 +319,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g= github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 h1:2gom8MohxN0SnhHZBYAC4S8jHG+ENEnXjyJ5xKe3vLc= github.com/aws/aws-sdk-go-v2/service/kms v1.49.4/go.mod h1:HO31s0qt0lso/ADvZQyzKs8js/ku0fMHsfyXW8OPVYc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 h1:C2dUPSnEpy4voWFIq3JNd8gN0Y5vYGDo44eUE58a/p8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 h1:FEs3IkfJWp+Sz3ZY6sAxmebBF0lr1wBcTWkuFW1OFJg= github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= From 5fbb1c3fb6a36aa77854cd3431961804b7fb0238 Mon Sep 17 00:00:00 2001 From: Piotr Rygielski <114479+vikin91@users.noreply.github.com> Date: Mon, 2 Feb 2026 14:43:21 +0100 Subject: [PATCH 091/232] chore(metrics): Make help more descriptive, tune histograms (#18779) --- central/baseimage/watcher/metrics.go | 4 +- central/hash/manager/metrics.go | 4 +- central/metrics/central.go | 47 ++++++++++++-------- central/metrics/init.go | 1 + sensor/common/detector/metrics/metrics.go | 15 ++++--- sensor/common/metrics/metrics.go | 38 ++++++++-------- sensor/common/networkflow/metrics/metrics.go | 2 +- 7 files changed, 62 insertions(+), 49 deletions(-) diff --git a/central/baseimage/watcher/metrics.go b/central/baseimage/watcher/metrics.go index 8dd40cf3373be..22572ef0691e3 100644 --- a/central/baseimage/watcher/metrics.go +++ b/central/baseimage/watcher/metrics.go @@ -24,7 +24,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: "base_image_watcher", Name: "poll_duration_seconds", - Help: "Time taken to complete a poll cycle", + Help: "Time spent completing a poll cycle", Buckets: prometheus.ExponentialBuckets(0.1, 2, 10), // 0.1s to ~102s }, []string{"error"}, @@ -63,7 +63,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: "base_image_watcher", Name: "scan_duration_seconds", - Help: "Time taken to scan a repository (list tags + fetch metadata)", + Help: "Time spent scanning a repository (list tags + fetch metadata)", Buckets: prometheus.ExponentialBuckets(0.1, 2, 12), // 0.1s to ~409s }, []string{ "registry_domain", diff --git a/central/hash/manager/metrics.go b/central/hash/manager/metrics.go index a783ef6731b58..937adace71116 100644 --- a/central/hash/manager/metrics.go +++ b/central/hash/manager/metrics.go @@ -14,13 +14,13 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "deduping_hash_size", - Help: "Number of hashes in the deduping hashes", + Help: "Number of persisted deduplication hashes for a cluster at last flush", }, []string{"cluster"}) dedupingHashCounterVec = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "deduping_hash_count", - Help: "Number of operations against the hashes by cluster, resource type and operation", + Help: "Counts add/remove operations on deduplication hashes by cluster and resource type", }, []string{"cluster", "ResourceType", "Operation"}) ) diff --git a/central/metrics/central.go b/central/metrics/central.go index 248e18e2aac60..c52b146564af8 100644 --- a/central/metrics/central.go +++ b/central/metrics/central.go @@ -16,7 +16,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "index_op_duration", - Help: "Time taken to perform an index operation", + Help: "Time spent performing an index operation in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Operation", "Type"}) @@ -25,7 +25,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "cache_op_duration", - Help: "Time taken to perform a cache operation", + Help: "Time spent performing a cache operation in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Operation", "Type"}) @@ -34,7 +34,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "prune_duration", - Help: "Time to perform a pruning operation", + Help: "Time to perform a pruning operation in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Type"}) @@ -43,7 +43,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "postgres_op_duration", - Help: "Time taken to perform a postgres operation", + Help: "Time spent performing a postgres operation in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Operation", "Type"}) @@ -52,7 +52,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "postgres_acquire_conn_op_duration", - Help: "Time taken to acquire a Postgres connection", + Help: "Time spent acquiring a Postgres connection in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Operation", "Type"}) @@ -61,7 +61,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "graphql_op_duration", - Help: "Time taken to run a single graphql sub resolver/sub query", + Help: "Time spent running a single graphql sub resolver/sub query in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Resolver", "Operation"}) @@ -70,7 +70,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "graphql_query_duration", - Help: "Time taken to run a single graphql query", + Help: "Time spent running a single graphql query in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Query"}) @@ -79,7 +79,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "sensor_event_duration", - Help: "Time taken to perform an process a sensor event operation", + Help: "Time spent performing a sensor event operation in milliseconds", // We care more about precision at lower latencies, or outliers at higher latencies. Buckets: prometheus.ExponentialBuckets(4, 2, 13), }, []string{"Type", "Action"}) @@ -88,14 +88,14 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "sensor_event_queue", - Help: "Number of elements in removed from the queue", + Help: "Number of enqueue and dequeue operations on Central's event deduping queues for messages arriving from Sensor", }, []string{"Operation", "Type"}) resourceProcessedCounterVec = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "resource_processed_count", - Help: "Number of elements received and processed", + Help: "Number of sensor event resources successfully processed by Central pipelines", }, []string{"Operation", "Resource"}) totalNetworkFlowsReceivedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ @@ -112,18 +112,26 @@ var ( Help: "A counter of the total number of network endpoints received by Central from Sensor", }, []string{"ClusterID"}) + // totalExternalPoliciesGauge is deprecated due to naming confusion (vector vs. gauge), use currentExternalPolicies instead. totalExternalPoliciesGauge = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "total_external_policies_count", - Help: "A gauge of the total number of policy as code CRs that have been accepted by Central from Config Controller", + Help: "Number of policy-as-code CRs accepted by Central from Config Controller", + }) + // currentExternalPolicies replaces the totalExternalPoliciesGauge + currentExternalPolicies = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: metrics.PrometheusNamespace, + Subsystem: metrics.CentralSubsystem.String(), + Name: "number_of_external_policies_current", + Help: "Number of policy-as-code CRs accepted by Central from Config Controller", }) riskProcessingHistogramVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "risk_processing_duration", - Help: "Histogram of how long risk processing takes", + Help: "Time in milliseconds spent recomputing and persisting risk scores in Central's risk manager for deployments, nodes, images, and their components", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Risk_Reprocessor"}) @@ -131,7 +139,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "datastore_function_duration", - Help: "Histogram of how long a datastore function takes", + Help: "Histogram of how long a datastore function takes in milliseconds", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Type", "Function"}) @@ -139,7 +147,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "function_segment_duration", - Help: "Histogram of how long a particular segment within a function takes", + Help: "Histogram of how long a particular segment within a function takes in milliseconds", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }, []string{"Segment"}) @@ -169,34 +177,35 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "orphaned_plop_total", - Help: "A counter of the total number of PLOP objects without a reference to a ProcessIndicator", + Help: "Count of process-listening-on-port records that arrived without a matching process indicator", }, []string{"ClusterID"}) processQueueLengthGauge = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "process_queue_length", - Help: "A gauge that indicates the current number of processes that have not been flushed", + Help: "Current number of process indicators queued for baseline evaluation and persistence", }) sensorEventsDeduperCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "sensor_event_deduper", - Help: "A counter that tracks objects that has passed the sensor event deduper in the connection stream", + Help: "Counts sensor events skipped by the deduper vs processed as new", }, []string{"status", "type"}) pipelinePanicCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "pipeline_panics", - Help: "A counter that tracks the number of panics that have occurred in the processing pipelines", + Help: "Count of panics recovered in Central processing pipelines", }, []string{"resource"}) sensorConnectedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.CentralSubsystem.String(), Name: "sensor_connected", + Help: "Count of sensor connections observed by Central", }, []string{"ClusterID", "connection_state"}) grpcLastMessageSizeReceived = prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -421,10 +430,12 @@ func IncrementTotalNetworkEndpointsReceivedCounter(clusterID string, numberOfEnd func IncrementTotalExternalPoliciesGauge() { totalExternalPoliciesGauge.Inc() + currentExternalPolicies.Inc() } func DecrementTotalExternalPoliciesGauge() { totalExternalPoliciesGauge.Dec() + currentExternalPolicies.Dec() } // ObserveRiskProcessingDuration adds an observation for risk processing duration. diff --git a/central/metrics/init.go b/central/metrics/init.go index 7f2f8e9b79f4b..d62706ebf4836 100644 --- a/central/metrics/init.go +++ b/central/metrics/init.go @@ -17,6 +17,7 @@ func init() { totalNetworkFlowsReceivedCounter, totalNetworkEndpointsReceivedCounter, totalExternalPoliciesGauge, + currentExternalPolicies, sensorEventDurationHistogramVec, riskProcessingHistogramVec, datastoreFunctionDurationHistogramVec, diff --git a/sensor/common/detector/metrics/metrics.go b/sensor/common/detector/metrics/metrics.go index 924014157a616..441b602a660e9 100644 --- a/sensor/common/detector/metrics/metrics.go +++ b/sensor/common/detector/metrics/metrics.go @@ -72,8 +72,8 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "enricher_image_scan_internal_exponential_backoff_seconds", - Help: "Time spent in exponential backoff for the ImageScanInternal endpoint", - Buckets: prometheus.ExponentialBuckets(4, 2, 8), + Help: "Time spent backing off before a successful ImageScanInternal response, typically due to scan rate limiting", + Buckets: prometheus.ExponentialBuckets(1, 2, 10), }) networkPoliciesStored = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, @@ -105,7 +105,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "node_scan_processed_total", - Help: "Total number of Node Inventories/Indexes received/sent by this Sensor", + Help: "Counts node inventory/index reports received from Compliance and sent to Central", }, []string{ // Name of the node sending an inventory @@ -137,7 +137,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "node_scanning_ack_processed_total", - Help: "Total number of Acks or Nacks for Node Inventories/Indexes processed by Sensor", + Help: "Counts ACK/NACK messages for node inventory/index processing", }, []string{ // Name of the node sending an inventory @@ -218,15 +218,16 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "block_scan_calls_total", - Help: "A counter that tracks the operations in blocking scan calls", + Help: "Counts add/remove operations for blocking scans triggered by deployment create/update", }, []string{"Operation", "Path"}) scanCallDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "scan_call_duration_milliseconds", - Help: "Time taken to call scan in milliseconds", - Buckets: prometheus.ExponentialBuckets(4, 2, 16), + Help: "Total time spent calling Scan in milliseconds, including retries and backoff waits. " + + "Applies to both local and remote scans (whichever is currently used in Sensor).", + Buckets: prometheus.ExponentialBuckets(4, 2, 16), }) scanAndSetCall = prometheus.NewCounterVec(prometheus.CounterOpts{ diff --git a/sensor/common/metrics/metrics.go b/sensor/common/metrics/metrics.go index 10d1c3399ceef..3f271fd9f601e 100644 --- a/sensor/common/metrics/metrics.go +++ b/sensor/common/metrics/metrics.go @@ -60,28 +60,28 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "process_enrichment_drops", - Help: "A counter of the total number of times we've dropped enriching process indicators", + Help: "Count of process indicators dropped because container metadata was not available before LRU eviction", }) processEnrichmentHits = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "process_enrichment_hits", - Help: "A counter of the total number of times we've successfully enriched process indicators", + Help: "Count of process indicators successfully enriched with container metadata", }) processEnrichmentLRUCacheSize = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "process_enrichment_cache_size", - Help: "A gauge to track the enrichment lru cache size", + Help: "Current number of container entries waiting in the process-enrichment LRU cache", }) sensorIndicatorChannelFullCounter = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "indicators_channel_indicator_dropped_counter", - Help: "A counter of the total number of times we've dropped indicators from the indicators channel because it was full", + Help: "Total process indicator events dropped because the outgoing buffer to Central was full", }) networkFlowBufferGauge = prometheus.NewGauge(prometheus.GaugeOpts{ @@ -116,14 +116,14 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "process_signal_dropper_counter", - Help: "A counter of the total number of process indicators that were dropped if the buffer was full", + Help: "Count of process signals dropped due to shutdown or a full output buffer", }) sensorEvents = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "sensor_events", - Help: "A counter for the total number of events sent from Sensor to Central", + Help: "Total number of events sent from Sensor to Central", }, []string{"Action", "ResourceType", "Type"}) sensorLastMessageSizeSent = prometheus.NewGaugeVec(prometheus.GaugeOpts{ @@ -158,7 +158,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "k8s_events", - Help: "A counter for the total number of typed k8s events processed by Sensor", + Help: "Total number of Kubernetes resource events processed by the Sensor listener", }, []string{"Action", "Resource"}) resourcesSyncedUnchaged = prometheus.NewCounter(prometheus.CounterOpts{ @@ -172,37 +172,37 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "resources_synced_size", - Help: "A gauge to track how large ResourcesSynced message is", + Help: "Size in bytes of the most recent ResourcesSynced message sent to Central", }) deploymentEnhancementQueueSize = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "deployment_enhancement_queue_size", - Help: "A counter to track deployments queued up in Sensor to be enhanced", + Help: "Current number of deployment enhancement requests from Central waiting to be processed", }) k8sObjectIngestionToSendDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "k8s_event_ingestion_to_send_duration", - Help: "Time taken to fully process an event from Kubernetes", - Buckets: prometheus.ExponentialBuckets(4, 2, 8), + Help: "Sensor-side time from ingesting a Kubernetes event to sending the resulting update to Central in milliseconds", + Buckets: prometheus.ExponentialBuckets(4, 2, 10), }, []string{"Action", "Resource", "Dispatcher", "Type"}) k8sObjectProcessingDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "k8s_event_processing_duration", - Help: "Time taken to fully process an event from Kubernetes", - Buckets: prometheus.ExponentialBuckets(4, 2, 8), + Help: "Time spent fully processing an event from Kubernetes in milliseconds", + Buckets: prometheus.ExponentialBuckets(4, 2, 10), }, []string{"Action", "Resource", "Dispatcher"}) resolverChannelSize = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "resolver_channel_size", - Help: "A gauge to track the resolver channel size", + Help: "Current number of resource events waiting in the resolver input queue", }) // ResolverDedupingQueueSize a gauge to track the resolver's deduping queue size. @@ -210,14 +210,14 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "resolver_deduping_queue_size", - Help: "A gauge to track the resolver deduping queue size", + Help: "Current number of pending deployment references in the resolver deduping queue", }) outputChannelSize = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "output_channel_size", - Help: "A gauge to track the output channel size", + Help: "Current number of resolved events waiting in the output queue before detector/forwarding", }) telemetryLabels = prometheus.Labels{ @@ -275,7 +275,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "num_messages_waiting_for_transmission_to_central", - Help: "A counter that tracks the operations in the responses channel", + Help: "Counts enqueue, dequeue, and drop operations on the Sensor-to-Central buffered stream", }, []string{Operation, "MessageType"}) // componentProcessMessageDurationSeconds tracks the duration of ProcessMessage calls for each component @@ -283,8 +283,8 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "component_process_message_duration_seconds", - Help: "Time taken to process messages from Central in each sensor component", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 12), // 1ms to ~4s + Help: "Time spent handling a message from Central inside a Sensor component in seconds", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 16), // 1ms to ~32s }, []string{ComponentName}) // ComponentQueueOperations keeps track of the operations of the component queue buffer. diff --git a/sensor/common/networkflow/metrics/metrics.go b/sensor/common/networkflow/metrics/metrics.go index 9e90256c7b31e..e8eec7c079388 100644 --- a/sensor/common/networkflow/metrics/metrics.go +++ b/sensor/common/networkflow/metrics/metrics.go @@ -173,7 +173,7 @@ var ( Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: netFlowManagerPrefix + "processes_listening_on_port_enrichment_events_total", - Help: "Total number of enrichment outcomes for the plop", + Help: "Counts enrichment outcomes for process-listening-on-port endpoints during flow enrichment", }, []string{"containerIDfound", "result", "action", "isHistorical", "reason", "isClosed", "rotten", "mature", "fresh"}) ) From 86911c02f2c4b677c52e91eccdf9cc24fe4148e9 Mon Sep 17 00:00:00 2001 From: Sukumar Yethadka Date: Mon, 2 Feb 2026 15:40:21 +0100 Subject: [PATCH 092/232] ROX-31432: Improve error messages for missing Central endpoint (#18760) --- roxctl/central/login/login.go | 7 +- roxctl/common/client.go | 2 +- roxctl/common/connection.go | 2 +- roxctl/common/errors.go | 35 +++++++++ roxctl/common/errors_test.go | 104 +++++++++++++++++++++++++++ roxctl/common/flags/endpoint.go | 17 ++++- roxctl/common/flags/endpoint_test.go | 82 +++++++++++++++++++++ 7 files changed, 245 insertions(+), 4 deletions(-) diff --git a/roxctl/central/login/login.go b/roxctl/central/login/login.go index dfc89c54d3b2b..5566d32ae8d30 100644 --- a/roxctl/central/login/login.go +++ b/roxctl/central/login/login.go @@ -275,7 +275,12 @@ In case the access token is expired and cannot be refreshed, you have to run "ro func (l *loginCommand) verifyLoginAuthProviders() error { // Use the HTTP client with the anonymous auth method to force anonymous access. // Note that this may still be done via HTTP/2 instead of HTTP/1, unless HTTP/1 is forced. - httpClient, err := l.env.HTTPClient(30*time.Second, common.WithAuthMethod(auth.Anonymous())) + // When using the default endpoint, disable retries to fail fast on connection errors. + clientOpts := []common.HttpClientOption{common.WithAuthMethod(auth.Anonymous())} + if !flags.EndpointWasExplicitlyProvided() { + clientOpts = append(clientOpts, common.WithRetryCount(0)) + } + httpClient, err := l.env.HTTPClient(l.timeout, clientOpts...) if err != nil { return errors.Wrap(err, "creating HTTP client") diff --git a/roxctl/common/client.go b/roxctl/common/client.go index ce171dc0091b3..d1f8cb4ef4dcb 100644 --- a/roxctl/common/client.go +++ b/roxctl/common/client.go @@ -163,7 +163,7 @@ func (client *roxctlClientImpl) Do(req *http.Request) (*http.Response, error) { if _, ok := err.(*url.Error); ok { err = errors.Unwrap(err) } - return resp, errors.Wrap(err, "error when doing http request") + return resp, EnhanceConnectionError(errors.Wrap(err, "error when doing http request")) } // NewReq creates a new http.Request which will have all authentication metadata injected diff --git a/roxctl/common/connection.go b/roxctl/common/connection.go index cb0baa968d1ac..19246279db05e 100644 --- a/roxctl/common/connection.go +++ b/roxctl/common/connection.go @@ -37,7 +37,7 @@ func WithRetryTimeout(timeout time.Duration) GRPCOption { func GetGRPCConnection(am auth.Method, connectionOpts ...GRPCOption) (*grpc.ClientConn, error) { endpoint, serverName, usePlaintext, err := ConnectNames() if err != nil { - return nil, errors.Wrap(err, "could not get endpoint for gRPC connection") + return nil, EnhanceConnectionError(errors.Wrap(err, "could not get endpoint for gRPC connection")) } perRPCCreds, err := am.GetCredentials(endpoint) if err != nil { diff --git a/roxctl/common/errors.go b/roxctl/common/errors.go index 515310034c3c0..9a3c58a637811 100644 --- a/roxctl/common/errors.go +++ b/roxctl/common/errors.go @@ -2,9 +2,12 @@ package common import ( "slices" + "strings" + "github.com/pkg/errors" "github.com/stackrox/rox/pkg/errox" "github.com/stackrox/rox/pkg/retry" + "github.com/stackrox/rox/roxctl/common/flags" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -48,3 +51,35 @@ func MakeRetryable(err error) error { // Mark all other errors as retryable. return retry.MakeRetryable(err) } + +// EnhanceConnectionError enhances connection errors with helpful guidance when +// the user has not explicitly configured an endpoint and connection fails. +func EnhanceConnectionError(err error) error { + if err == nil { + return nil + } + + // Only enhance errors when using the default endpoint + if flags.EndpointWasExplicitlyProvided() { + return err + } + + // Check if this is a connection-related error + // Normalize to lowercase to handle case variations in error messages + errMsg := strings.ToLower(err.Error()) + isConnectionError := strings.Contains(errMsg, "connection refused") || + strings.Contains(errMsg, "i/o timeout") || + strings.Contains(errMsg, "no such host") || + strings.Contains(errMsg, "dial tcp") || + strings.Contains(errMsg, "deadline exceeded") + + if !isConnectionError { + return err + } + + // Enhance the error with helpful guidance + return errors.Wrapf(err, + "Could not connect to Central at default endpoint (%s).\n"+ + "HINT: Configure the Central endpoint using the -e/--endpoint flag or ROX_ENDPOINT environment variable.\n"+ + " Example: roxctl -e central.example.com:443 ", flags.DefaultEndpoint) +} diff --git a/roxctl/common/errors_test.go b/roxctl/common/errors_test.go index c2bf78cbfef65..ab44aeafe13c6 100644 --- a/roxctl/common/errors_test.go +++ b/roxctl/common/errors_test.go @@ -63,3 +63,107 @@ func TestMakeRetryable(t *testing.T) { }) } } + +func TestEnhanceConnectionError(t *testing.T) { + // Note: This test focuses on error enhancement logic and the ROX_ENDPOINT environment variable. + // Testing the -e/--endpoint flag and kube context configuration requires integration with cobra + // and flag parsing, which is comprehensively covered by TestEndpointWasExplicitlyProvided in endpoint_test.go. + cases := []struct { + name string + err error + setEndpointEnv bool + expectEnhancement bool + }{ + { + name: "nil error returns nil", + err: nil, + setEndpointEnv: false, + expectEnhancement: false, + }, + { + name: "connection refused with default endpoint", + err: errors.New("dial tcp [::1]:8443: connect: connection refused"), + setEndpointEnv: false, + expectEnhancement: true, + }, + { + name: "connection refused with ROX_ENDPOINT set", + err: errors.New("dial tcp [::1]:8443: connect: connection refused"), + setEndpointEnv: true, + expectEnhancement: false, + }, + { + name: "i/o timeout with default endpoint", + err: errors.New("dial tcp 192.168.1.1:443: i/o timeout"), + setEndpointEnv: false, + expectEnhancement: true, + }, + { + name: "no such host with default endpoint", + err: errors.New("dial tcp: lookup invalid.local: no such host"), + setEndpointEnv: false, + expectEnhancement: true, + }, + { + name: "deadline exceeded with default endpoint", + err: errors.New("context deadline exceeded"), + setEndpointEnv: false, + expectEnhancement: true, + }, + { + name: "wrapped connection error with default endpoint", + err: errors.Wrap(errors.New("dial tcp [::1]:8443: connect: connection refused"), "error when doing http request"), + setEndpointEnv: false, + expectEnhancement: true, + }, + { + name: "authentication error with default endpoint", + err: errors.New("authentication failed: invalid credentials"), + setEndpointEnv: false, + expectEnhancement: false, + }, + { + name: "TLS error with default endpoint", + err: errors.New("x509: certificate signed by unknown authority"), + setEndpointEnv: false, + expectEnhancement: false, + }, + { + name: "generic error with default endpoint", + err: errors.New("some other error"), + setEndpointEnv: false, + expectEnhancement: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // Set up environment to simulate explicit or default endpoint + if tc.setEndpointEnv { + t.Setenv("ROX_ENDPOINT", "central.example.com:443") + } else { + t.Setenv("ROX_ENDPOINT", "") + } + + originalErr := tc.err + result := EnhanceConnectionError(tc.err) + + if tc.err == nil { + assert.Nil(t, result) + return + } + + if tc.expectEnhancement { + // Should contain the hint message + assert.Contains(t, result.Error(), "Could not connect to Central at default endpoint") + assert.Contains(t, result.Error(), "HINT: Configure the Central endpoint") + assert.Contains(t, result.Error(), "Example: roxctl -e central.example.com:443") + // Original error should still be present + assert.Contains(t, result.Error(), originalErr.Error()) + } else { + // Should be unchanged + assert.Equal(t, originalErr, result) + } + }) + } +} diff --git a/roxctl/common/flags/endpoint.go b/roxctl/common/flags/endpoint.go index 71cc29ade7cc2..8abba43d4a288 100644 --- a/roxctl/common/flags/endpoint.go +++ b/roxctl/common/flags/endpoint.go @@ -48,11 +48,14 @@ const ( plaintextFlagName = "plaintext" serverNameFlagName = "server-name" useKubeContextFlagName = "use-current-k8s-context" + + // DefaultEndpoint is the default central endpoint when none is specified + DefaultEndpoint = "localhost:8443" ) // AddConnectionFlags adds connection-related flags to roxctl. func AddConnectionFlags(c *cobra.Command) { - c.PersistentFlags().StringVarP(&endpoint, endpointFlagName, "e", "localhost:8443", + c.PersistentFlags().StringVarP(&endpoint, endpointFlagName, "e", DefaultEndpoint, "Endpoint for service to contact. Alternatively, set the endpoint via the ROX_ENDPOINT environment variable.") endpointChanged = &c.PersistentFlags().Lookup(endpointFlagName).Changed c.PersistentFlags().StringVarP(&serverName, serverNameFlagName, "s", "", "TLS ServerName to use for SNI "+ @@ -189,3 +192,15 @@ func CentralURL() (*url.URL, error) { func UseKubeContext() bool { return useKubeContext || env.UseCurrentKubeContext.BooleanSetting() } + +// EndpointWasExplicitlyProvided returns true if the user explicitly provided an endpoint +// via the -e/--endpoint flag, the ROX_ENDPOINT environment variable, or is using +// port-forwarding via kubeconfig context. +// Returns false when the implicit default endpoint is being used (i.e., when no endpoint +// configuration was explicitly provided by the user). +func EndpointWasExplicitlyProvided() bool { + // Defensively handle nil pointer (shouldn't happen in normal execution but prevents panic) + flagChanged := endpointChanged != nil && *endpointChanged + endpointVal := strings.TrimSpace(env.EndpointEnv.Setting()) + return flagChanged || endpointVal != "" || UseKubeContext() +} diff --git a/roxctl/common/flags/endpoint_test.go b/roxctl/common/flags/endpoint_test.go index 557349ca2f281..aa4e7e8ca926d 100644 --- a/roxctl/common/flags/endpoint_test.go +++ b/roxctl/common/flags/endpoint_test.go @@ -87,3 +87,85 @@ func TestEndpointAndPlaintextSetting(t *testing.T) { }) } } + +func TestEndpointWasExplicitlyProvided(t *testing.T) { + testCases := []struct { + name string + endpointFlag bool + envVarValue string + kubeContextFlag bool + kubeContextEnvValue string + expectedExplicit bool + }{ + { + name: "default endpoint - not explicit", + endpointFlag: false, + envVarValue: "", + kubeContextFlag: false, + expectedExplicit: false, + }, + { + name: "endpoint flag set - explicit", + endpointFlag: true, + envVarValue: "", + kubeContextFlag: false, + expectedExplicit: true, + }, + { + name: "env var set - explicit", + endpointFlag: false, + envVarValue: "central.example.com:443", + kubeContextFlag: false, + expectedExplicit: true, + }, + { + name: "kube context flag enabled - explicit", + endpointFlag: false, + envVarValue: "", + kubeContextFlag: true, + expectedExplicit: true, + }, + { + name: "kube context env var enabled - explicit", + endpointFlag: false, + envVarValue: "", + kubeContextFlag: false, + kubeContextEnvValue: "true", + expectedExplicit: true, + }, + { + name: "endpoint flag and env var both set - explicit", + endpointFlag: true, + envVarValue: "central.example.com:443", + kubeContextFlag: false, + expectedExplicit: true, + }, + { + name: "all three set - explicit", + endpointFlag: true, + envVarValue: "central.example.com:443", + kubeContextFlag: true, + expectedExplicit: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Reset state + endpointChanged = pointers.Bool(tc.endpointFlag) + useKubeContext = tc.kubeContextFlag + + // Clear and optionally set environment variables + t.Setenv(env.EndpointEnv.EnvVar(), tc.envVarValue) + if tc.kubeContextEnvValue != "" { + t.Setenv(env.UseCurrentKubeContext.EnvVar(), tc.kubeContextEnvValue) + } else { + t.Setenv(env.UseCurrentKubeContext.EnvVar(), "") + } + + result := EndpointWasExplicitlyProvided() + assert.Equal(t, tc.expectedExplicit, result, + "EndpointWasExplicitlyProvided() returned %v, expected %v", result, tc.expectedExplicit) + }) + } +} From dbb120424636afcd88b681ae03d239c62222a145 Mon Sep 17 00:00:00 2001 From: Sukumar Yethadka Date: Mon, 2 Feb 2026 15:40:44 +0100 Subject: [PATCH 093/232] ROX-31431: Improve error message for missing roxctl auth (#18761) --- roxctl/common/config/config.go | 11 ++++- roxctl/common/config/config_test.go | 64 +++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/roxctl/common/config/config.go b/roxctl/common/config/config.go index 39b1ceb2e599e..b1e0fae45041f 100644 --- a/roxctl/common/config/config.go +++ b/roxctl/common/config/config.go @@ -12,6 +12,13 @@ import ( "go.yaml.in/yaml/v3" ) +const missingAuthCredsMessage = ` +No authentication credentials are available. Please provide authentication using one of the following methods: + - Use the --password flag or set the ROX_ADMIN_PASSWORD environment variable + - Use the --token-file flag and point to a file containing your API token + - Set the ROX_API_TOKEN environment variable with your API token + - Run "roxctl central login" to save credentials (requires writable home directory)` + // Store provides the ability to read / write configurations for roxctl from / to a configuration file. // //go:generate mockgen-wrapper @@ -164,7 +171,7 @@ func determineConfigDir() (string, error) { } path := filepath.Join(homeDir, ".roxctl") if err := os.MkdirAll(path, 0700); err != nil { - return "", errors.Wrapf(err, "creating config directory %s", path) + return "", errox.NoCredentials.Newf("unable to access configuration directory %s: %v%s", path, err, missingAuthCredsMessage) } return path, nil } @@ -173,7 +180,7 @@ func determineConfigDir() (string, error) { func ensureRoxctlConfigFilePathExists(configDir string) (string, error) { configFilePath := filepath.Join(configDir, "roxctl-config.yaml") if err := os.MkdirAll(configDir, 0700); err != nil { - return "", errors.Wrapf(err, "creating roxctl config file %s", configDir) + return "", errox.NoCredentials.Newf("unable to create configuration directory %s: %v%s", configDir, err, missingAuthCredsMessage) } return configFilePath, nil } diff --git a/roxctl/common/config/config_test.go b/roxctl/common/config/config_test.go index a210bed0185ea..8eb2dd74badd6 100644 --- a/roxctl/common/config/config_test.go +++ b/roxctl/common/config/config_test.go @@ -1,11 +1,13 @@ package config import ( + "errors" "os" "path/filepath" "testing" "github.com/stackrox/rox/pkg/env" + "github.com/stackrox/rox/pkg/errox" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.yaml.in/yaml/v3" @@ -94,3 +96,65 @@ func TestDetermineConfigPath(t *testing.T) { assert.NoError(t, err) assert.Equal(t, homeDir, dir) } + +func TestDetermineConfigDirPermissionDenied(t *testing.T) { + // This test verifies that when directory creation fails (e.g., permission denied), + // a helpful error message is returned suggesting alternative authentication methods. + + testDir := t.TempDir() + + // Create a file where HOME/.roxctl would be, which will cause mkdir to fail + homeFile := filepath.Join(testDir, "homefile") + require.NoError(t, os.WriteFile(homeFile, []byte("not a directory"), 0600)) + + // Set HOME to the file path - this will cause MkdirAll to fail when trying to create .roxctl + t.Setenv("HOME", homeFile) + t.Setenv(env.ConfigDirEnv.EnvVar(), "") + t.Setenv("XDG_RUNTIME_DIR", "") + + expectedConfigPath := filepath.Join(homeFile, ".roxctl") + + _, err := determineConfigDir() + require.Error(t, err) + + // Verify the error is a NoCredentials error + assert.True(t, errors.Is(err, errox.NoCredentials)) + + // Verify the error message contains the expected config path and helpful suggestions + errMsg := err.Error() + assert.Contains(t, errMsg, expectedConfigPath, "error should mention the config path that failed") + assert.Contains(t, errMsg, "No authentication credentials are available") + assert.Contains(t, errMsg, "--password") + assert.Contains(t, errMsg, "ROX_API_TOKEN") + assert.Contains(t, errMsg, "--token-file") + assert.Contains(t, errMsg, "roxctl central login") +} + +func TestEnsureRoxctlConfigFilePathExistsPermissionDenied(t *testing.T) { + // This test verifies that ensureRoxctlConfigFilePathExists also returns the helpful + // error message when directory creation fails. + + testDir := t.TempDir() + + // Create a file that blocks directory creation + blockingFile := filepath.Join(testDir, "blocking") + require.NoError(t, os.WriteFile(blockingFile, []byte("not a directory"), 0600)) + + // Try to create config file path under the blocking file + configDirPath := filepath.Join(blockingFile, "subdir") + + _, err := ensureRoxctlConfigFilePathExists(configDirPath) + require.Error(t, err) + + // Verify the error is a NoCredentials error + assert.True(t, errors.Is(err, errox.NoCredentials)) + + // Verify the error message contains the expected config path and helpful suggestions + errMsg := err.Error() + assert.Contains(t, errMsg, configDirPath, "error should mention the config path that failed") + assert.Contains(t, errMsg, "No authentication credentials are available") + assert.Contains(t, errMsg, "--password") + assert.Contains(t, errMsg, "ROX_API_TOKEN") + assert.Contains(t, errMsg, "--token-file") + assert.Contains(t, errMsg, "roxctl central login") +} From 96293b42c891d7bd270b35c8c66e59dea3403eed Mon Sep 17 00:00:00 2001 From: Alex Rukletsov Date: Mon, 2 Feb 2026 16:15:02 +0100 Subject: [PATCH 094/232] chore(docs): Mention shepherding and AI guidelines in Readme (#18662) Co-authored-by: Misha Sugakov <537715+msugakov@users.noreply.github.com> --- README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index fbae0276d6729..2f2ae8d7652f1 100644 --- a/README.md +++ b/README.md @@ -323,14 +323,13 @@ Then go to https://localhost:8000/ in your web browser. --- ## Development +- Start with [**pull request guidelines**](./.github/contributing.md) if you plan to make a contribution +- Look at [**shepherding**](./.github/shepherding.md) for non-trivial code submissions +- Check out our [**AI policy**](./.github/using-llms.md) +- **Go coding style guide**: [go-coding-style.md](./.github/go-coding-style.md) - **UI Dev Docs**: Refer to [ui/README.md](./ui/README.md) - - **E2E Dev Docs**: Refer to [qa-tests-backend/README.md](./qa-tests-backend/README.md) -- **Pull request guidelines**: [contributing.md](./.github/contributing.md) - -- **Go coding style guide**: [go-coding-style.md](./.github/go-coding-style.md) - ### Quickstart #### Build Tooling From e8d0d6ed3aa3ba4cd404a18d8153786516ed1b2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:36:52 +0100 Subject: [PATCH 095/232] chore(deps): bump github.com/onsi/gomega from 1.39.0 to 1.39.1 (#18765) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7fe5f1e2f2901..3e0b88c16cfcc 100644 --- a/go.mod +++ b/go.mod @@ -91,7 +91,7 @@ require ( github.com/nxadm/tail v1.4.11 github.com/olekukonko/tablewriter v1.1.3 github.com/onsi/ginkgo/v2 v2.28.1 - github.com/onsi/gomega v1.39.0 + github.com/onsi/gomega v1.39.1 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 github.com/openshift-online/ocm-sdk-go v0.1.493 diff --git a/go.sum b/go.sum index d11667283c79a..c8f1a2a879587 100644 --- a/go.sum +++ b/go.sum @@ -1263,8 +1263,8 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q= -github.com/onsi/gomega v1.39.0/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= From 9f4ca3fd13b311c23b778961051388740ce8ec07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:36:58 +0100 Subject: [PATCH 096/232] chore(deps): bump github.com/lib/pq from 1.10.9 to 1.11.1 (#18747) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 3e0b88c16cfcc..7209a30c5f8c7 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( github.com/jeremywohl/flatten v1.0.1 github.com/joshdk/go-junit v1.0.0 github.com/klauspost/compress v1.18.3 - github.com/lib/pq v1.10.9 + github.com/lib/pq v1.11.1 github.com/machinebox/graphql v0.2.2 github.com/mailru/easyjson v0.9.1 github.com/mdlayher/vsock v1.2.1 diff --git a/go.sum b/go.sum index c8f1a2a879587..90c1294992949 100644 --- a/go.sum +++ b/go.sum @@ -1104,8 +1104,9 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI= +github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= From 131f7913d46961db503fe4b521985664f977fb7d Mon Sep 17 00:00:00 2001 From: Tom Martensen Date: Mon, 2 Feb 2026 18:07:44 +0100 Subject: [PATCH 097/232] chore: enable FACT_VERSION updater (#18797) --- .../workflows/update_collector_periodic.yaml | 3 +- .github/workflows/update_fact_periodic.yaml | 50 +++++++++++++++++++ .../workflows/update_scanner_periodic.yaml | 1 + 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/update_fact_periodic.yaml diff --git a/.github/workflows/update_collector_periodic.yaml b/.github/workflows/update_collector_periodic.yaml index db4a50239f689..69d0fd61cb0ad 100644 --- a/.github/workflows/update_collector_periodic.yaml +++ b/.github/workflows/update_collector_periodic.yaml @@ -2,7 +2,7 @@ name: Update collector version on: workflow_dispatch: schedule: - - cron: 0 5 * * 1-5 + - cron: 0 5 * * 1 jobs: update-collector: if: ${{ github.repository_owner == 'stackrox' }} @@ -37,6 +37,7 @@ jobs: labels: | ci-all-qa-tests dependencies + konflux-build team-reviewers: | collector-team draft: false diff --git a/.github/workflows/update_fact_periodic.yaml b/.github/workflows/update_fact_periodic.yaml new file mode 100644 index 0000000000000..160203926f04a --- /dev/null +++ b/.github/workflows/update_fact_periodic.yaml @@ -0,0 +1,50 @@ +name: Update fact version +on: + workflow_dispatch: + schedule: + - cron: 0 5 * * 1 +jobs: + update-fact: + if: ${{ github.repository_owner == 'stackrox' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + with: + ref: master # this is our target branch + fetch-depth: 0 # we need to fetch all branches + - name: Checkout fact repo + uses: actions/checkout@v6 + with: + repository: stackrox/fact + path: deps/fact + fetch-depth: 0 # we need to fetch tags + - name: Update version + run: make -sC deps/fact tag | tee FACT_VERSION + - name: Create Pull Request + id: cpr + uses: peter-evans/create-pull-request@v8 + with: + token: '${{ secrets.RHACS_BOT_GITHUB_TOKEN }}' + commit-message: Update FACT_VERSION + committer: '${{ secrets.RHACS_BOT_GITHUB_USERNAME }} <${{ secrets.RHACS_BOT_GITHUB_EMAIL }}>' + author: '${{ secrets.RHACS_BOT_GITHUB_USERNAME }} <${{ secrets.RHACS_BOT_GITHUB_EMAIL }}>' + branch: update_fact_version + signoff: false + delete-branch: true + title: 'chore(fact): Update FACT_VERSION' + body: | + Daily update of FACT_VERSION to latest master version + labels: | + ci-all-qa-tests + dependencies + konflux-build + team-reviewers: | + collector-team + draft: false + - name: Enable Pull Request Automerge + if: steps.cpr.outputs.pull-request-operation == 'created' + uses: peter-evans/enable-pull-request-automerge@v3 + with: + token: '${{ secrets.RHACS_BOT_GITHUB_TOKEN }}' + pull-request-number: '${{ steps.cpr.outputs.pull-request-number }}' + merge-method: squash diff --git a/.github/workflows/update_scanner_periodic.yaml b/.github/workflows/update_scanner_periodic.yaml index 55acd4e23e780..c00ed436f96e3 100644 --- a/.github/workflows/update_scanner_periodic.yaml +++ b/.github/workflows/update_scanner_periodic.yaml @@ -37,6 +37,7 @@ jobs: labels: | ci-all-qa-tests dependencies + konflux-build team-reviewers: scanner draft: false - name: Enable Pull Request Automerge From e08dfd2c6f8edb999acbc512cdfec4280754257f Mon Sep 17 00:00:00 2001 From: Alex Vulaj Date: Mon, 2 Feb 2026 12:08:51 -0500 Subject: [PATCH 098/232] ROX-27340: Sanitize db errors and fix nil pointer panic in pagination (#18579) --- pkg/grpc/errors/interceptor.go | 23 +++++++++++-- pkg/grpc/errors/interceptor_test.go | 51 +++++++++++++++++++++++++++++ pkg/search/paginated/paginated.go | 4 ++- 3 files changed, 74 insertions(+), 4 deletions(-) diff --git a/pkg/grpc/errors/interceptor.go b/pkg/grpc/errors/interceptor.go index f890dc9b4f0eb..6b6669a7b45f4 100644 --- a/pkg/grpc/errors/interceptor.go +++ b/pkg/grpc/errors/interceptor.go @@ -5,8 +5,9 @@ import ( "errors" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/jackc/pgx/v5/pgconn" "github.com/stackrox/rox/pkg/errox" - errox_grpc "github.com/stackrox/rox/pkg/errox/grpc" + erroxgrpc "github.com/stackrox/rox/pkg/errox/grpc" "github.com/stackrox/rox/pkg/logging" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -102,7 +103,23 @@ func ErrToGrpcStatus(err error) *status.Status { if s := unwrapGRPCStatus(err); s != nil { code = s.Code() } else { - code = errox_grpc.RoxErrorToGRPCCode(err) + code = erroxgrpc.RoxErrorToGRPCCode(err) } - return status.New(code, err.Error()) + return status.New(code, sanitizeErrorMessage(err)) +} + +// sanitizeErrorMessage removes sensitive internal error details from error messages before they are sent to clients. +// This prevents disclosure of database schema, SQL syntax errors, and other implementation details. +func sanitizeErrorMessage(err error) string { + if err == nil { + return "" + } + + // Check if this is a PostgreSQL error - if so, don't expose internal details + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + return "Database operation failed" + } + + return err.Error() } diff --git a/pkg/grpc/errors/interceptor_test.go b/pkg/grpc/errors/interceptor_test.go index dca50676a7ee2..00cbd1ee3dde4 100644 --- a/pkg/grpc/errors/interceptor_test.go +++ b/pkg/grpc/errors/interceptor_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/jackc/pgx/v5/pgconn" "github.com/pkg/errors" "github.com/stackrox/rox/pkg/errox" "github.com/stackrox/rox/pkg/logging" @@ -285,3 +286,53 @@ func TestErrToGRPCStatus(t *testing.T) { }) } } + +func TestSanitizeErrorMessage(t *testing.T) { + tests := []struct { + name string + err error + expected string + }{ + { + name: "nil error", + err: nil, + expected: "", + }, + { + name: "PostgreSQL error should be sanitized", + err: &pgconn.PgError{Code: "42P01", Message: "relation \"secret_table\" does not exist"}, + expected: "Database operation failed", + }, + { + name: "Wrapped PostgreSQL error should be sanitized", + err: errors.Wrap(&pgconn.PgError{Code: "23505", Message: "duplicate key value violates unique constraint"}, "failed to insert"), + expected: "Database operation failed", + }, + { + name: "nil pointer dereference should pass through", + err: errors.New("runtime error: invalid memory address or nil pointer dereference"), + expected: "runtime error: invalid memory address or nil pointer dereference", + }, + { + name: "runtime error should pass through", + err: errors.New("runtime error: index out of range"), + expected: "runtime error: index out of range", + }, + { + name: "normal error should pass through", + err: errox.NotFound, + expected: "not found", + }, + { + name: "normal wrapped error should pass through", + err: errors.Wrap(errox.InvalidArgs, "bad request"), + expected: "bad request: invalid arguments", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := sanitizeErrorMessage(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/search/paginated/paginated.go b/pkg/search/paginated/paginated.go index f1e154317e486..6606d89077615 100644 --- a/pkg/search/paginated/paginated.go +++ b/pkg/search/paginated/paginated.go @@ -125,7 +125,9 @@ func FillPaginationV2(query *v1.Query, pagination *v2.Pagination, maxLimit int32 // Fill in sort options. for _, so := range pagination.GetSortOptions() { - queryPagination.SortOptions = append(queryPagination.SortOptions, toQuerySortOptionV2(so)) + if so != nil { + queryPagination.SortOptions = append(queryPagination.SortOptions, toQuerySortOptionV2(so)) + } } // Prefer the new field over the old one. From 6f2b8fce1d11f53fb2ead5bbb01b6498b0e22362 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Mon, 2 Feb 2026 22:44:05 +0100 Subject: [PATCH 099/232] chore(scanner): Update SCANNER_VERSION (#17702) --- SCANNER_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SCANNER_VERSION b/SCANNER_VERSION index 0aede1756389d..1f04287548a48 100644 --- a/SCANNER_VERSION +++ b/SCANNER_VERSION @@ -1 +1 @@ -2.38.x-24-gcd5fb7a6d1 +2.38.x-157-g493a1461f0 From af07c1e50ed5fe86a940b77e789c12b97bbc2868 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Mon, 2 Feb 2026 21:39:25 -0600 Subject: [PATCH 100/232] ROX-32879: update components when base images are updated (#18791) --- .../datastore/store/v2/postgres/store.go | 9 +++++++ .../imagev2/datastore/store/postgres/store.go | 9 +++++++ pkg/baseimage/util.go | 24 +++++++++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 pkg/baseimage/util.go diff --git a/central/image/datastore/store/v2/postgres/store.go b/central/image/datastore/store/v2/postgres/store.go index 59e3921118c99..cb3db6ca3fd81 100644 --- a/central/image/datastore/store/v2/postgres/store.go +++ b/central/image/datastore/store/v2/postgres/store.go @@ -16,7 +16,9 @@ import ( "github.com/stackrox/rox/central/metrics" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/baseimage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/logging" ops "github.com/stackrox/rox/pkg/metrics" "github.com/stackrox/rox/pkg/postgres" @@ -475,6 +477,13 @@ func (s *storeImpl) upsert(ctx context.Context, obj *storage.Image) error { scanUpdated = scanUpdated || componentsEmpty + if features.BaseImageDetection.Enabled() { + // Re-verify base images when base image detection is enabled: + // 1. Legacy images may lack base image info if the feature was enabled after they were scanned. + // 2. User-provided base images may change over time. + scanUpdated = scanUpdated || baseimage.BaseImagesUpdated(oldImage.GetBaseImageInfo(), obj.GetBaseImageInfo()) + } + splitParts, err := common.SplitV2(obj, scanUpdated) if err != nil { return err diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index 741bea8385c5d..c246da7cd07f6 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -15,7 +15,9 @@ import ( "github.com/stackrox/rox/central/metrics" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/baseimage" "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/logging" ops "github.com/stackrox/rox/pkg/metrics" "github.com/stackrox/rox/pkg/postgres" @@ -520,6 +522,13 @@ func (s *storeImpl) upsert(ctx context.Context, obj *storage.ImageV2) error { scanUpdated = scanUpdated || componentsEmpty + if features.BaseImageDetection.Enabled() { + // Re-verify base images when base image detection is enabled: + // 1. Legacy images may lack base image info if the feature was enabled after they were scanned. + // 2. User-provided base images may change over time. + scanUpdated = scanUpdated || baseimage.BaseImagesUpdated(oldImage.GetBaseImageInfo(), obj.GetBaseImageInfo()) + } + splitParts, err := common.Split(obj, scanUpdated) if err != nil { return err diff --git a/pkg/baseimage/util.go b/pkg/baseimage/util.go new file mode 100644 index 0000000000000..f19bba9513cd8 --- /dev/null +++ b/pkg/baseimage/util.go @@ -0,0 +1,24 @@ +package baseimage + +import "github.com/stackrox/rox/generated/storage" + +func BaseImagesUpdated(prev, cur []*storage.BaseImageInfo) bool { + if len(prev) != len(cur) { + return true + } + + existing := make(map[string]int) + for _, p := range prev { + existing[p.GetBaseImageDigest()]++ + } + + for _, c := range cur { + digest := c.GetBaseImageDigest() + if count, ok := existing[digest]; !ok || count == 0 { + return true // Found a current digest or more occurrences than before + } + existing[digest]-- + } + + return false +} From 637723a1ae895092de4164ee7fe6eb266d53d4ec Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Mon, 2 Feb 2026 22:39:57 -0500 Subject: [PATCH 101/232] ROX-32847: Enable ROX_BASE_IMAGE_DETECTION (#18708) Co-authored-by: Mark Pedrotti Co-authored-by: J. Victor Martins --- pkg/features/list.go | 2 +- pkg/images/enricher/enricher_impl_test.go | 8 +++++++- pkg/images/enricher/enricher_v2_impl_test.go | 8 +++++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/features/list.go b/pkg/features/list.go index 1bf3a2d4e7823..3cfa019b415a8 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -114,7 +114,7 @@ var ( CVEFixTimestampCriteria = registerFeature("Enable grace period criteria based on CVE fix timestamp", "ROX_CVE_FIX_TIMESTAMP", enabled) // BaseImageDetection enables base image detection and management functionality. - BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION") + BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION", enabled) // DelegatedBaseImageScanning enables delegation of base image repository scanning to secured clusters. DelegatedBaseImageScanning = registerFeature("Enable delegated base image scanning to secured clusters", "ROX_DELEGATED_BASE_IMAGE_SCANNING") diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index 9035dc89b842f..58bc904da736a 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -42,6 +42,10 @@ func imageGetterPanicOnCall(_ context.Context, _ string) (*storage.Image, bool, panic("Unexpected call to imageGetter") } +func emptyBaseImageGetter(_ context.Context, _ []string) ([]*storage.BaseImageInfo, error) { + return nil, nil +} + var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -348,6 +352,7 @@ func TestEnricherFlow(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetter, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -400,6 +405,7 @@ func TestCVESuppression(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetter, } img := &storage.Image{Id: "id", Name: &storage.ImageName{Registry: "reg"}, @@ -1446,7 +1452,7 @@ func TestEnrichImageWithBaseImages(t *testing.T) { func newEnricher(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricher { return New(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - nil, + emptyBaseImageGetter, emptyImageGetter, mockReporter, emptySignatureIntegrationGetter, nil) } diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index cfa31cb4f0779..97059dd818f6b 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -43,6 +43,10 @@ func imageGetterV2PanicOnCall(_ context.Context, _ string) (*storage.ImageV2, bo panic("Unexpected call to imageGetter") } +func emptyBaseImageGetterV2(_ context.Context, _ []string) ([]*storage.BaseImageInfo, error) { + return nil, nil +} + var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -357,6 +361,7 @@ func TestEnricherV2Flow(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetterV2, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -409,6 +414,7 @@ func TestCVESuppressionV2(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetterV2, } img := &storage.ImageV2{ @@ -1313,7 +1319,7 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { func newEnricherV2(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricherV2 { return NewV2(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - nil, + emptyBaseImageGetterV2, emptyImageGetterV2, mockReporter, emptySignatureIntegrationGetter, nil) } From 5a0a4654750515fdfcb71747303089229abd30bd Mon Sep 17 00:00:00 2001 From: Misha Sugakov <537715+msugakov@users.noreply.github.com> Date: Tue, 3 Feb 2026 14:16:11 +0100 Subject: [PATCH 102/232] fix(build): Revert ROX-32847: Enable ROX_BASE_IMAGE_DETECTION (#18806) --- pkg/features/list.go | 2 +- pkg/images/enricher/enricher_impl_test.go | 8 +------- pkg/images/enricher/enricher_v2_impl_test.go | 8 +------- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/pkg/features/list.go b/pkg/features/list.go index 3cfa019b415a8..1bf3a2d4e7823 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -114,7 +114,7 @@ var ( CVEFixTimestampCriteria = registerFeature("Enable grace period criteria based on CVE fix timestamp", "ROX_CVE_FIX_TIMESTAMP", enabled) // BaseImageDetection enables base image detection and management functionality. - BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION", enabled) + BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION") // DelegatedBaseImageScanning enables delegation of base image repository scanning to secured clusters. DelegatedBaseImageScanning = registerFeature("Enable delegated base image scanning to secured clusters", "ROX_DELEGATED_BASE_IMAGE_SCANNING") diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index 58bc904da736a..9035dc89b842f 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -42,10 +42,6 @@ func imageGetterPanicOnCall(_ context.Context, _ string) (*storage.Image, bool, panic("Unexpected call to imageGetter") } -func emptyBaseImageGetter(_ context.Context, _ []string) ([]*storage.BaseImageInfo, error) { - return nil, nil -} - var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -352,7 +348,6 @@ func TestEnricherFlow(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetter, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -405,7 +400,6 @@ func TestCVESuppression(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetter, } img := &storage.Image{Id: "id", Name: &storage.ImageName{Registry: "reg"}, @@ -1452,7 +1446,7 @@ func TestEnrichImageWithBaseImages(t *testing.T) { func newEnricher(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricher { return New(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - emptyBaseImageGetter, + nil, emptyImageGetter, mockReporter, emptySignatureIntegrationGetter, nil) } diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index 97059dd818f6b..cfa31cb4f0779 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -43,10 +43,6 @@ func imageGetterV2PanicOnCall(_ context.Context, _ string) (*storage.ImageV2, bo panic("Unexpected call to imageGetter") } -func emptyBaseImageGetterV2(_ context.Context, _ []string) ([]*storage.BaseImageInfo, error) { - return nil, nil -} - var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -361,7 +357,6 @@ func TestEnricherV2Flow(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetterV2, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -414,7 +409,6 @@ func TestCVESuppressionV2(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetterV2, } img := &storage.ImageV2{ @@ -1319,7 +1313,7 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { func newEnricherV2(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricherV2 { return NewV2(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - emptyBaseImageGetterV2, + nil, emptyImageGetterV2, mockReporter, emptySignatureIntegrationGetter, nil) } From c2d5389af150b5c44a0ab252654edc059280bc3e Mon Sep 17 00:00:00 2001 From: Charmik Sheth <101146970+charmik-redhat@users.noreply.github.com> Date: Tue, 3 Feb 2026 22:57:41 +0530 Subject: [PATCH 103/232] ROX-32925 : Address bugs causing flappy fix timestamp (#18789) --- .../cve/converter/utils/convert_utils_v2.go | 2 + .../converter/utils/convert_utils_v2_test.go | 2 + .../image/info/datastore/datastore_impl.go | 7 +- .../info/datastore/datastore_impl_test.go | 9 +- central/image/datastore/datastore_impl.go | 7 +- central/image/service/service_impl.go | 5 +- central/imagev2/datastore/datastore_impl.go | 7 +- central/vulnmgmt/service/service_impl.go | 2 + generated/storage/cve.pb.go | 15 +++- generated/storage/cve_vtproto.pb.go | 85 +++++++++++++++++++ pkg/images/utils/test/utils_test.go | 39 +++++++++ pkg/images/utils/utils.go | 8 ++ proto/storage/cve.proto | 1 + proto/storage/proto.lock | 5 ++ 14 files changed, 183 insertions(+), 11 deletions(-) diff --git a/central/cve/converter/utils/convert_utils_v2.go b/central/cve/converter/utils/convert_utils_v2.go index fe6546ec72a99..4fe09a77b43d1 100644 --- a/central/cve/converter/utils/convert_utils_v2.go +++ b/central/cve/converter/utils/convert_utils_v2.go @@ -33,6 +33,7 @@ func ImageCVEV2ToEmbeddedVulnerability(vuln *storage.ImageCVEV2) *storage.Embedd VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, VulnerabilityTypes: []storage.EmbeddedVulnerability_VulnerabilityType{storage.EmbeddedVulnerability_IMAGE_VULNERABILITY}, State: vuln.GetState(), + Datasource: vuln.GetDatasource(), } if vuln.GetIsFixable() { @@ -105,6 +106,7 @@ func EmbeddedVulnerabilityToImageCVEV2(imageID string, componentID string, index IsFixable: from.GetFixedBy() != "", ImpactScore: impactScore, Advisory: from.GetAdvisory(), + Datasource: from.GetDatasource(), } if !features.FlattenImageData.Enabled() { ret.ImageId = imageID diff --git a/central/cve/converter/utils/convert_utils_v2_test.go b/central/cve/converter/utils/convert_utils_v2_test.go index 0bf10ac6b1f02..d37f78f1eb1ce 100644 --- a/central/cve/converter/utils/convert_utils_v2_test.go +++ b/central/cve/converter/utils/convert_utils_v2_test.go @@ -91,6 +91,7 @@ var ( EpssProbability: 22, EpssPercentile: 98, }, + Datasource: "test-ds", }, { Cve: "cve2", @@ -225,6 +226,7 @@ func getTestCVEs(t *testing.T) []*storage.ImageCVEV2 { IsFixable: false, HasFixedBy: nil, ComponentId: getTestComponentID(0), + Datasource: "test-ds", }, { Id: getTestCVEID(testVulns[1], getTestComponentID(1), 1), diff --git a/central/cve/image/info/datastore/datastore_impl.go b/central/cve/image/info/datastore/datastore_impl.go index 58c5c2fff5ec7..8bbc2f0259db5 100644 --- a/central/cve/image/info/datastore/datastore_impl.go +++ b/central/cve/image/info/datastore/datastore_impl.go @@ -66,7 +66,12 @@ func (ds *datastoreImpl) UpsertMany(ctx context.Context, infos []*storage.ImageC // Populate both maps separately - can't use index-based loop because // existing may not be in the same order as infos for _, info := range infos { - newInfoMap[info.GetId()] = info + if prev, ok := newInfoMap[info.GetId()]; ok { + // if there are are multiple infos with same id, earlier timestamps take precedence + newInfoMap[info.GetId()] = updateTimestamps(prev, info) + } else { + newInfoMap[info.GetId()] = info + } } for _, info := range existing { oldInfoMap[info.GetId()] = info diff --git a/central/cve/image/info/datastore/datastore_impl_test.go b/central/cve/image/info/datastore/datastore_impl_test.go index 26f163a0af891..5798ed73e5c81 100644 --- a/central/cve/image/info/datastore/datastore_impl_test.go +++ b/central/cve/image/info/datastore/datastore_impl_test.go @@ -167,9 +167,11 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsert_PreservesTimestamps() { // TestUpsertMany_PreservesTimestamps tests that UpsertMany preserves earlier timestamps. func (s *ImageCVEInfoDataStoreSuite) TestUpsertMany_PreservesTimestamps() { earlier := time.Now().Add(-24 * time.Hour) + earlier2 := time.Now().Add(-12 * time.Hour) later := time.Now() - // First, insert infos with earlier timestamps + // First, insert infos with earlier timestamps. + // If there are multiple infos with same id, earlier timestamps should be preserved. firstInfos := []*storage.ImageCVEInfo{ { Id: "test-cve-1#test-pkg#test-ds", @@ -181,6 +183,11 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsertMany_PreservesTimestamps() { FirstSystemOccurrence: timestamppb.New(earlier), FixAvailableTimestamp: timestamppb.New(earlier), }, + { + Id: "test-cve-1#test-pkg#test-ds", + FirstSystemOccurrence: nil, + FixAvailableTimestamp: timestamppb.New(earlier2), + }, } err := s.datastore.UpsertMany(s.ctx, firstInfos) s.NoError(err) diff --git a/central/image/datastore/datastore_impl.go b/central/image/datastore/datastore_impl.go index d861084214664..f0547036f40d7 100644 --- a/central/image/datastore/datastore_impl.go +++ b/central/image/datastore/datastore_impl.go @@ -459,11 +459,12 @@ func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image * for _, vuln := range component.GetVulns() { id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) if info, ok := infoMap[id]; ok { - vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + if vuln.GetFixAvailableTimestamp() == nil && vuln.GetFixedBy() != "" { + // Set the fix timestamp if it was not provided by the scanner + vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + } vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() } - // Blank out datasource after using it - this is internal scanner data not meant for end users - vuln.Datasource = "" } } diff --git a/central/image/service/service_impl.go b/central/image/service/service_impl.go index 6dc7997147909..e06a4622a53f2 100644 --- a/central/image/service/service_impl.go +++ b/central/image/service/service_impl.go @@ -164,6 +164,7 @@ func (s *serviceImpl) GetImage(ctx context.Context, request *v1.GetImageRequest) // This modifies the image object utils.StripCVEDescriptionsNoClone(image) } + utils.StripDatasourceNoClone(image.GetScan()) return image, nil } @@ -216,6 +217,7 @@ func (s *serviceImpl) ExportImages(req *v1.ExportImageRequest, srv v1.ImageServi defer cancel() } return s.mappingDatastore.WalkByQuery(ctx, parsedQuery, func(image *storage.Image) error { + utils.StripDatasourceNoClone(image.GetScan()) if err := srv.Send(&v1.ExportImageResponse{Image: image}); err != nil { return err } @@ -622,6 +624,7 @@ func (s *serviceImpl) ScanImage(ctx context.Context, request *v1.ScanImageReques if !request.GetIncludeSnoozed() { utils.FilterSuppressedCVEsNoCloneV2(imgV2) } + utils.StripDatasourceNoClone(imgV2.GetScan()) return utils.ConvertToV1(imgV2), nil } @@ -648,7 +651,7 @@ func (s *serviceImpl) ScanImage(ctx context.Context, request *v1.ScanImageReques if !request.GetIncludeSnoozed() { utils.FilterSuppressedCVEsNoClone(img) } - + utils.StripDatasourceNoClone(img.GetScan()) return img, nil } diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index 6971d114fa920..08fb6c5f2f903 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -465,11 +465,12 @@ func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image * for _, vuln := range component.GetVulns() { id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) if info, ok := infoMap[id]; ok { - vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + if vuln.GetFixAvailableTimestamp() == nil && vuln.GetFixedBy() != "" { + // Set the fix timestamp if it was not provided by the scanner + vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + } vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() } - // Blank out datasource after using it - this is internal scanner data not meant for end users - vuln.Datasource = "" } } diff --git a/central/vulnmgmt/service/service_impl.go b/central/vulnmgmt/service/service_impl.go index b52b6c6a819f3..0ccf0ada10704 100644 --- a/central/vulnmgmt/service/service_impl.go +++ b/central/vulnmgmt/service/service_impl.go @@ -17,6 +17,7 @@ import ( "github.com/stackrox/rox/pkg/grpc/authz" "github.com/stackrox/rox/pkg/grpc/authz/perrpc" "github.com/stackrox/rox/pkg/grpc/authz/user" + "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/sac/resources" @@ -120,6 +121,7 @@ func (s *serviceImpl) VulnMgmtExportWorkloads(req *v1.VulnMgmtExportWorkloadsReq continue } if found { + utils.StripDatasourceNoClone(img.GetScan()) images = append(images, img) imageCache.Add(imgID, img) } else { diff --git a/generated/storage/cve.pb.go b/generated/storage/cve.pb.go index 89540f104688a..3b164cf577040 100644 --- a/generated/storage/cve.pb.go +++ b/generated/storage/cve.pb.go @@ -1590,6 +1590,7 @@ type ImageCVEV2 struct { ImageIdV2 string `protobuf:"bytes,15,opt,name=image_id_v2,json=imageIdV2,proto3" json:"image_id_v2,omitempty" sql:"fk(ImageV2:id),index=btree,allow-null"` // @gotags: sql:"fk(ImageV2:id),index=btree,allow-null" // Timestamp when the fix for this CVE was made available according to the sources. FixAvailableTimestamp *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=fix_available_timestamp,json=fixAvailableTimestamp,proto3" json:"fix_available_timestamp,omitempty" search:"CVE Fix Available Timestamp,hidden"` // @gotags: search:"CVE Fix Available Timestamp,hidden" + Datasource string `protobuf:"bytes,17,opt,name=datasource,proto3" json:"datasource,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1746,6 +1747,13 @@ func (x *ImageCVEV2) GetFixAvailableTimestamp() *timestamppb.Timestamp { return nil } +func (x *ImageCVEV2) GetDatasource() string { + if x != nil { + return x.Datasource + } + return "" +} + type isImageCVEV2_HasFixedBy interface { isImageCVEV2_HasFixedBy() } @@ -2689,7 +2697,7 @@ const file_storage_cve_proto_rawDesc = "" + "\anvdcvss\x18\n" + " \x01(\x02R\anvdcvss\x125\n" + "\fcvss_metrics\x18\v \x03(\v2\x12.storage.CVSSScoreR\vcvssMetrics\x12E\n" + - "\x11nvd_score_version\x18\f \x01(\x0e2\x19.storage.CvssScoreVersionR\x0fnvdScoreVersion:\x02\x18\x01\"\xdc\x05\n" + + "\x11nvd_score_version\x18\f \x01(\x0e2\x19.storage.CvssScoreVersionR\x0fnvdScoreVersion:\x02\x18\x01\"\xfc\x05\n" + "\n" + "ImageCVEV2\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x1d\n" + @@ -2709,7 +2717,10 @@ const file_storage_cve_proto_rawDesc = "" + "\fcomponent_id\x18\r \x01(\tR\vcomponentId\x12-\n" + "\badvisory\x18\x0e \x01(\v2\x11.storage.AdvisoryR\badvisory\x12\x1e\n" + "\vimage_id_v2\x18\x0f \x01(\tR\timageIdV2\x12R\n" + - "\x17fix_available_timestamp\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\x15fixAvailableTimestampB\x0e\n" + + "\x17fix_available_timestamp\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\x15fixAvailableTimestamp\x12\x1e\n" + + "\n" + + "datasource\x18\x11 \x01(\tR\n" + + "datasourceB\x0e\n" + "\fhas_fixed_by\"\xe4\x03\n" + "\aNodeCVE\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x124\n" + diff --git a/generated/storage/cve_vtproto.pb.go b/generated/storage/cve_vtproto.pb.go index 35eb569be525f..e1fdecbbea325 100644 --- a/generated/storage/cve_vtproto.pb.go +++ b/generated/storage/cve_vtproto.pb.go @@ -270,6 +270,7 @@ func (m *ImageCVEV2) CloneVT() *ImageCVEV2 { r.Advisory = m.Advisory.CloneVT() r.ImageIdV2 = m.ImageIdV2 r.FixAvailableTimestamp = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.FixAvailableTimestamp).CloneVT()) + r.Datasource = m.Datasource if m.HasFixedBy != nil { r.HasFixedBy = m.HasFixedBy.(interface { CloneVT() isImageCVEV2_HasFixedBy @@ -911,6 +912,9 @@ func (this *ImageCVEV2) EqualVT(that *ImageCVEV2) bool { if !(*timestamppb1.Timestamp)(this.FixAvailableTimestamp).EqualVT((*timestamppb1.Timestamp)(that.FixAvailableTimestamp)) { return false } + if this.Datasource != that.Datasource { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -2037,6 +2041,15 @@ func (m *ImageCVEV2) MarshalToSizedBufferVT(dAtA []byte) (int, error) { } i -= size } + if len(m.Datasource) > 0 { + i -= len(m.Datasource) + copy(dAtA[i:], m.Datasource) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Datasource))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } if m.FixAvailableTimestamp != nil { size, err := (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -3121,6 +3134,10 @@ func (m *ImageCVEV2) SizeVT() (n int) { l = (*timestamppb1.Timestamp)(m.FixAvailableTimestamp).SizeVT() n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } + l = len(m.Datasource) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -5950,6 +5967,38 @@ func (m *ImageCVEV2) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Datasource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Datasource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -10099,6 +10148,42 @@ func (m *ImageCVEV2) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Datasource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Datasource = stringValue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/images/utils/test/utils_test.go b/pkg/images/utils/test/utils_test.go index 3723648eb746c..4ebfa4400c4fc 100644 --- a/pkg/images/utils/test/utils_test.go +++ b/pkg/images/utils/test/utils_test.go @@ -601,3 +601,42 @@ func TestFillScanStatsV2(t *testing.T) { }) } } + +func TestStripDatasourceNoClone(t *testing.T) { + original := &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + { + Name: "comp1", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "cve-1", + Datasource: "test-ds-1", + }, + { + Cve: "cve-2", + Datasource: "test-ds-2", + }, + }, + }, + { + Name: "comp2", + Vulns: []*storage.EmbeddedVulnerability{ + { + Cve: "cve-3", + Datasource: "test-ds-3", + }, + { + Cve: "cve-4", + Datasource: "test-ds-4", + }, + }, + }, + }, + } + + utils.StripDatasourceNoClone(original) + assert.Equal(t, "", original.GetComponents()[0].GetVulns()[0].GetDatasource()) + assert.Equal(t, "", original.GetComponents()[0].GetVulns()[1].GetDatasource()) + assert.Equal(t, "", original.GetComponents()[1].GetVulns()[0].GetDatasource()) + assert.Equal(t, "", original.GetComponents()[1].GetVulns()[1].GetDatasource()) +} diff --git a/pkg/images/utils/utils.go b/pkg/images/utils/utils.go index d25df8df9db5b..8e920cb41becf 100644 --- a/pkg/images/utils/utils.go +++ b/pkg/images/utils/utils.go @@ -481,3 +481,11 @@ func FillScanStatsV2(i *storage.ImageV2) { } } } + +func StripDatasourceNoClone(scan *storage.ImageScan) { + for _, c := range scan.GetComponents() { + for _, v := range c.GetVulns() { + v.Datasource = "" + } + } +} diff --git a/proto/storage/cve.proto b/proto/storage/cve.proto index 5a932d6db6d69..7ec3972237500 100644 --- a/proto/storage/cve.proto +++ b/proto/storage/cve.proto @@ -199,6 +199,7 @@ message ImageCVEV2 { // Timestamp when the fix for this CVE was made available according to the sources. google.protobuf.Timestamp fix_available_timestamp = 16; // @gotags: search:"CVE Fix Available Timestamp,hidden" + string datasource = 17; } message NodeCVE { diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 0be9b354dc92e..983e6d118f52d 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -6603,6 +6603,11 @@ "id": 16, "name": "fix_available_timestamp", "type": "google.protobuf.Timestamp" + }, + { + "id": 17, + "name": "datasource", + "type": "string" } ] }, From ac4ec9a8bb7edc9bfcfee1bd6dd01266eed90944 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20He=C3=9F?= Date: Tue, 3 Feb 2026 18:41:10 +0100 Subject: [PATCH 104/232] feat(operator): add Makefile variables for local operator development (#18598) Co-authored-by: Claude Sonnet 4.5 --- operator/Makefile | 9 +++++++-- operator/README.md | 4 ++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/operator/Makefile b/operator/Makefile index 919cd8e57f205..e19dec7c080a0 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -12,6 +12,10 @@ CSV_VERSION ?= v$(VERSION) # Set to empty string to echo some command lines which are hidden by default. SILENT ?= @ +# Python binary to use for bundle helpers. Can be overridden to use a specific version (e.g., python3.9). +# The Konflux build uses Python 3.9, so using python3.9 locally ensures compatibility. +PYTHON ?= python3 + # This can be adjusted if deploying into non-standard namespaces. For example, as in # # NAMESPACE=... make -C operator stackrox-image-pull-secret @@ -344,6 +348,7 @@ build/Dockerfile.gen: Dockerfile .PHONY: docker-build docker-build: build/Dockerfile.gen smuggled-status-sh ## Build docker image with the operator. DOCKER_BUILDKIT=1 BUILDKIT_PROGRESS=plain docker build \ + $(if $(DOCKER_BUILD_LOAD),--load) \ -t ${IMG} \ $(if $(GOARCH),--build-arg TARGET_ARCH=$(GOARCH)) \ -f $< \ @@ -426,7 +431,7 @@ upgrade-dirty-tag-via-olm: kuttl ##@ Bundle and Index build # Commands to enter local Python virtual environment and get needed dependencies there. -ACTIVATE_PYTHON = python3 -m venv bundle_helpers/.venv ;\ +ACTIVATE_PYTHON = $(PYTHON) -m venv bundle_helpers/.venv ;\ . bundle_helpers/.venv/bin/activate ;\ pip3 install --upgrade pip==21.3.1 setuptools==59.6.0 ;\ pip3 install -r bundle_helpers/requirements-gha.txt @@ -497,7 +502,7 @@ bundle-post-process: test-bundle-helpers operator-sdk ## Post-process CSV file t .PHONY: bundle-build bundle-build: bundle.Dockerfile bundle-post-process ## Build the bundle image. - docker build -f $< -t $(BUNDLE_IMG) . + docker build $(if $(DOCKER_BUILD_LOAD),--load) -f $< -t $(BUNDLE_IMG) . .PHONY: bundle-test bundle-test: operator-sdk bundle-post-process ## Run scorecard tests against bundle files. diff --git a/operator/README.md b/operator/README.md index 273efc6748165..81f980d6d3c10 100644 --- a/operator/README.md +++ b/operator/README.md @@ -113,6 +113,8 @@ The recommended approach is the following. ```bash $ docker save stackrox/stackrox-operator:$(make tag) | ssh -o StrictHostKeyChecking=no -i $(minikube ssh-key) docker@$(minikube ip) docker load ``` + _Alternatively you can also set `DOCKER_BUILD_LOAD=1` in the make environment in the previous build step. This will load images into the local Docker daemon instead of leaving them just in BuildKit cache._ + 3. Install CRDs and deploy operator resources ```bash $ make deploy @@ -149,6 +151,8 @@ $ make docker-build docker-push # Build and push bundle image $ make bundle-build docker-push-bundle ``` +_Note: Bundle helpers depend on Python <=3.13. By default, the build process will use the default Python version installed on the build host. You can override the python version if needed via the optional `PYTHON` env var, e.g. `PYTHON=python3.10 make bundle-build`_ + Build and push everything as **one-liner** From 230a72bc16a6bbc3b3804020df315a386b099f04 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Tue, 3 Feb 2026 13:02:42 -0600 Subject: [PATCH 105/232] ROX-32847: Enable ROX_BASE_IMAGE_DETECTION (#18809) --- pkg/features/list.go | 2 +- pkg/images/enricher/enricher_impl_test.go | 8 +++++++- pkg/images/enricher/enricher_v2_impl_test.go | 8 +++++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/features/list.go b/pkg/features/list.go index 1bf3a2d4e7823..3cfa019b415a8 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -114,7 +114,7 @@ var ( CVEFixTimestampCriteria = registerFeature("Enable grace period criteria based on CVE fix timestamp", "ROX_CVE_FIX_TIMESTAMP", enabled) // BaseImageDetection enables base image detection and management functionality. - BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION") + BaseImageDetection = registerFeature("Enable base image detection and management functionality", "ROX_BASE_IMAGE_DETECTION", enabled) // DelegatedBaseImageScanning enables delegation of base image repository scanning to secured clusters. DelegatedBaseImageScanning = registerFeature("Enable delegated base image scanning to secured clusters", "ROX_DELEGATED_BASE_IMAGE_SCANNING") diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index 9035dc89b842f..ce599d16da226 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -42,6 +42,10 @@ func imageGetterPanicOnCall(_ context.Context, _ string) (*storage.Image, bool, panic("Unexpected call to imageGetter") } +func emptyBaseImageGetter(_ context.Context, _ []string) ([]*storage.BaseImage, error) { + return nil, nil +} + var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -348,6 +352,7 @@ func TestEnricherFlow(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetter, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -400,6 +405,7 @@ func TestCVESuppression(t *testing.T) { imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetter, } img := &storage.Image{Id: "id", Name: &storage.ImageName{Registry: "reg"}, @@ -1446,7 +1452,7 @@ func TestEnrichImageWithBaseImages(t *testing.T) { func newEnricher(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricher { return New(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - nil, + emptyBaseImageGetter, emptyImageGetter, mockReporter, emptySignatureIntegrationGetter, nil) } diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index cfa31cb4f0779..f126d4ef34808 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -43,6 +43,10 @@ func imageGetterV2PanicOnCall(_ context.Context, _ string) (*storage.ImageV2, bo panic("Unexpected call to imageGetter") } +func emptyBaseImageGetterV2(_ context.Context, _ []string) ([]*storage.BaseImage, error) { + return nil, nil +} + var _ signatures.SignatureFetcher = (*fakeSigFetcher)(nil) var _ scannertypes.Scanner = (*fakeScanner)(nil) @@ -357,6 +361,7 @@ func TestEnricherV2Flow(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetterV2, } if c.inMetadataCache { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) @@ -409,6 +414,7 @@ func TestCVESuppressionV2(t *testing.T) { imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, + baseImageGetter: emptyBaseImageGetterV2, } img := &storage.ImageV2{ @@ -1313,7 +1319,7 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { func newEnricherV2(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricherV2 { return NewV2(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, newCache(), - nil, + emptyBaseImageGetterV2, emptyImageGetterV2, mockReporter, emptySignatureIntegrationGetter, nil) } From 52223c2194212c7afaef600991b1b8d2e241f6be Mon Sep 17 00:00:00 2001 From: Jouko Virtanen Date: Tue, 3 Feb 2026 12:03:20 -0800 Subject: [PATCH 106/232] ROX-32871: Telemetry for process baseline auto lock (#18757) --- central/cluster/datastore/telemetry.go | 18 +++-- central/detection/lifecycle/manager_impl.go | 7 +- pkg/cluster/validation.go | 11 +++ pkg/cluster/validation_test.go | 87 +++++++++++++++++++++ 4 files changed, 110 insertions(+), 13 deletions(-) diff --git a/central/cluster/datastore/telemetry.go b/central/cluster/datastore/telemetry.go index 3a20123ba5a87..e75f00e398ff6 100644 --- a/central/cluster/datastore/telemetry.go +++ b/central/cluster/datastore/telemetry.go @@ -7,6 +7,7 @@ import ( "github.com/stackrox/rox/central/telemetry/centralclient" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" + clusterPkg "github.com/stackrox/rox/pkg/cluster" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/telemetry/phonehome" @@ -40,14 +41,15 @@ func trackClusterRegistered(cluster *storage.Cluster) { func makeClusterProperties(cluster *storage.Cluster) map[string]any { return map[string]any{ - "Main Image": cluster.GetMainImage(), - "Admission Controller": cluster.GetAdmissionController(), - "Collection Method": cluster.GetCollectionMethod().String(), - "Collector Image": cluster.GetCollectorImage(), - "Managed By": cluster.GetManagedBy().String(), - "Priority": cluster.GetPriority(), - "Cluster Type": cluster.GetType().String(), - "Slim Collector": cluster.GetSlimCollector(), + "Main Image": cluster.GetMainImage(), + "Admission Controller": cluster.GetAdmissionController(), + "Collection Method": cluster.GetCollectionMethod().String(), + "Collector Image": cluster.GetCollectorImage(), + "Managed By": cluster.GetManagedBy().String(), + "Priority": cluster.GetPriority(), + "Cluster Type": cluster.GetType().String(), + "Slim Collector": cluster.GetSlimCollector(), + "Auto Lock Process Baselines": clusterPkg.GetAutoLockProcessBaselinesEnabled(cluster), } } diff --git a/central/detection/lifecycle/manager_impl.go b/central/detection/lifecycle/manager_impl.go index 2d68fcbb6f04d..569489c2d902e 100644 --- a/central/detection/lifecycle/manager_impl.go +++ b/central/detection/lifecycle/manager_impl.go @@ -23,6 +23,7 @@ import ( "github.com/stackrox/rox/central/sensor/service/connection" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" + clusterPkg "github.com/stackrox/rox/pkg/cluster" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/features" @@ -226,11 +227,7 @@ func (m *managerImpl) isAutoLockEnabledForCluster(clusterId string) bool { return false } - if cluster.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_MANUAL || cluster.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_UNKNOWN { - return cluster.GetDynamicConfig().GetAutoLockProcessBaselinesConfig().GetEnabled() - } - - return cluster.GetHelmConfig().GetDynamicConfig().GetAutoLockProcessBaselinesConfig().GetEnabled() + return clusterPkg.GetAutoLockProcessBaselinesEnabled(cluster) } func (m *managerImpl) flushIndicatorQueue() { diff --git a/pkg/cluster/validation.go b/pkg/cluster/validation.go index b82f3fdba9b88..0a64a034da58c 100644 --- a/pkg/cluster/validation.go +++ b/pkg/cluster/validation.go @@ -78,3 +78,14 @@ func ValidatePartial(cluster *storage.Cluster) *errorhelpers.ErrorList { return errorList } + +// GetAutoLockProcessBaselinesEnabled returns whether the auto-lock process baselines feature is enabled +// for the given cluster. For manually managed clusters, it reads from the cluster's dynamic config. +// For helm/operator-managed clusters, it reads from the cluster's helm config. +func GetAutoLockProcessBaselinesEnabled(cluster *storage.Cluster) bool { + if cluster.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_MANUAL || cluster.GetManagedBy() == storage.ManagerType_MANAGER_TYPE_UNKNOWN { + return cluster.GetDynamicConfig().GetAutoLockProcessBaselinesConfig().GetEnabled() + } + + return cluster.GetHelmConfig().GetDynamicConfig().GetAutoLockProcessBaselinesConfig().GetEnabled() +} diff --git a/pkg/cluster/validation_test.go b/pkg/cluster/validation_test.go index c58a854183b50..8c156e034aeb5 100644 --- a/pkg/cluster/validation_test.go +++ b/pkg/cluster/validation_test.go @@ -149,3 +149,90 @@ func TestFullValidation(t *testing.T) { }) } } + +func TestGetAutoLockProcessBaselinesEnabled(t *testing.T) { + t.Run("returns true when enabled for manually managed cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL, + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: true, + }, + }, + } + assert.True(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns false when disabled for manually managed cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL, + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: false, + }, + }, + } + assert.False(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns true when enabled for unknown manager type cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_UNKNOWN, + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: true, + }, + }, + } + assert.True(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns true when enabled for helm managed cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, + HelmConfig: &storage.CompleteClusterConfig{ + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: true, + }, + }, + }, + } + assert.True(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns false when disabled for helm managed cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, + HelmConfig: &storage.CompleteClusterConfig{ + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: false, + }, + }, + }, + } + assert.False(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns true when enabled for operator managed cluster", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_KUBERNETES_OPERATOR, + HelmConfig: &storage.CompleteClusterConfig{ + DynamicConfig: &storage.DynamicClusterConfig{ + AutoLockProcessBaselinesConfig: &storage.AutoLockProcessBaselinesConfig{ + Enabled: true, + }, + }, + }, + } + assert.True(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) + + t.Run("returns false when config is nil", func(t *testing.T) { + cluster := &storage.Cluster{ + ManagedBy: storage.ManagerType_MANAGER_TYPE_MANUAL, + } + assert.False(t, GetAutoLockProcessBaselinesEnabled(cluster)) + }) +} From b00a93777d8f7178d2099b755c0e6bb4a68c2edb Mon Sep 17 00:00:00 2001 From: "red-hat-konflux[bot]" <126015336+red-hat-konflux[bot]@users.noreply.github.com> Date: Tue, 3 Feb 2026 20:14:48 +0000 Subject: [PATCH 107/232] chore(deps): update all dependencies (#18159) Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: red-hat-konflux[bot] <126015336+red-hat-konflux[bot]@users.noreply.github.com> --- image/postgres/konflux.Dockerfile | 2 +- image/rhel/konflux.Dockerfile | 4 +- image/roxctl/konflux.Dockerfile | 2 +- operator/konflux.Dockerfile | 2 +- operator/konflux.bundle.Dockerfile | 2 +- rpms.lock.yaml | 668 +++++++++++------------ scanner/image/db/konflux.Dockerfile | 2 +- scanner/image/scanner/konflux.Dockerfile | 2 +- 8 files changed, 342 insertions(+), 342 deletions(-) diff --git a/image/postgres/konflux.Dockerfile b/image/postgres/konflux.Dockerfile index 490be6b1405ef..afd8ac5582f4f 100644 --- a/image/postgres/konflux.Dockerfile +++ b/image/postgres/konflux.Dockerfile @@ -1,5 +1,5 @@ ARG PG_VERSION=15 -FROM registry.redhat.io/rhel8/postgresql-${PG_VERSION}:latest@sha256:042f6efe0f16e94ffb2d0a3bede852bb026b6dce661ac5b339e6f63846467b9d AS final +FROM registry.redhat.io/rhel8/postgresql-${PG_VERSION}:latest@sha256:103fd3b9deeea2a7c7d16af246ee5274bcd0f8b9e508485530c5b42ea2b9916c AS final USER root diff --git a/image/rhel/konflux.Dockerfile b/image/rhel/konflux.Dockerfile index 62a76a3be257b..1893b3c3192ce 100644 --- a/image/rhel/konflux.Dockerfile +++ b/image/rhel/konflux.Dockerfile @@ -37,7 +37,7 @@ RUN mkdir -p image/rhel/docs/api/v1 && \ RUN make copy-go-binaries-to-image-dir -FROM registry.access.redhat.com/ubi9/nodejs-20:latest@sha256:71a3810707370f30bc0958aea14c3a5af564a3962ae0819bf16fdde7df9b4378 AS ui-builder +FROM registry.access.redhat.com/ubi9/nodejs-20:latest@sha256:ad30ca76c555dafd2c0c772f8a12aae41cadc767c9654761c6fb706fd1659920 AS ui-builder WORKDIR /go/src/github.com/stackrox/rox/app @@ -59,7 +59,7 @@ ENV UI_PKG_INSTALL_EXTRA_ARGS="--ignore-scripts" RUN make -C ui build -FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:a670c5b613280e17a666c858c9263a50aafe1a023a8d5730c7a83cb53771487b +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:5dc6ba426ccbeb3954ead6b015f36b4a2d22320e5b356b074198d08422464ed2 ARG PG_VERSION diff --git a/image/roxctl/konflux.Dockerfile b/image/roxctl/konflux.Dockerfile index 568001aed31c8..3ee1ea2c63cf0 100644 --- a/image/roxctl/konflux.Dockerfile +++ b/image/roxctl/konflux.Dockerfile @@ -26,7 +26,7 @@ RUN RACE=0 CGO_ENABLED=1 GOOS=linux GOARCH=$(go env GOARCH) scripts/go-build.sh cp bin/linux_$(go env GOARCH)/roxctl image/bin/roxctl -FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:a670c5b613280e17a666c858c9263a50aafe1a023a8d5730c7a83cb53771487b +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:5dc6ba426ccbeb3954ead6b015f36b4a2d22320e5b356b074198d08422464ed2 COPY --from=builder /go/src/github.com/stackrox/rox/app/image/bin/roxctl /usr/bin/roxctl diff --git a/operator/konflux.Dockerfile b/operator/konflux.Dockerfile index 15c11d92f57ae..28ac4bf1b525d 100644 --- a/operator/konflux.Dockerfile +++ b/operator/konflux.Dockerfile @@ -17,7 +17,7 @@ ENV CI=1 GOFLAGS="" CGO_ENABLED=1 RUN GOOS=linux GOARCH=$(go env GOARCH) scripts/go-build-file.sh operator/cmd/main.go image/bin/operator -FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:a670c5b613280e17a666c858c9263a50aafe1a023a8d5730c7a83cb53771487b +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:5dc6ba426ccbeb3954ead6b015f36b4a2d22320e5b356b074198d08422464ed2 ARG BUILD_TAG diff --git a/operator/konflux.bundle.Dockerfile b/operator/konflux.bundle.Dockerfile index e328144d2b31e..ef18a782ad16c 100644 --- a/operator/konflux.bundle.Dockerfile +++ b/operator/konflux.bundle.Dockerfile @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/python-39:latest@sha256:8392799f609b0de3f9a4640400d460f5e2563b2b6f09e6b5fe89a67adda75c6a AS builder +FROM registry.access.redhat.com/ubi9/python-39:latest@sha256:c2112827949a0f2deb040bc8f2a57631daaddd453db2198258275668996dd65f AS builder # Because 'default' user cannot create build/ directory and errrors like: # mkdir: cannot create directory ‘build/’: Permission denied diff --git a/rpms.lock.yaml b/rpms.lock.yaml index 2d9fe0962e441..a4d67cdc387fe 100644 --- a/rpms.lock.yaml +++ b/rpms.lock.yaml @@ -25,20 +25,20 @@ arches: name: oniguruma evr: 6.8.2-3.el8 sourcerpm: oniguruma-6.8.2-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.aarch64.rpm repoid: rhel-8-for-aarch64-appstream-rpms - size: 1785883 - checksum: sha256:24562212b8673ccfbcfb80f7b3f6eca08ae83d65a2e2aadc924472a7bd97d6db + size: 1798263 + checksum: sha256:252c77d8411563980a3faa4fd4afca997bbd6f3456c1891036edaf429f676e34 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/postgresql-private-libs-15.14-1.module+el8.10.0+23423+5a199198.aarch64.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/postgresql-private-libs-15.15-1.module+el8.10.0+23782+2d6b2a31.aarch64.rpm repoid: rhel-8-for-aarch64-appstream-rpms - size: 129399 - checksum: sha256:94c27b96fd1a6d76c801cdbfc07da04d6220640c2f83a533b21711789e347714 + size: 129939 + checksum: sha256:48e3f2c86fe94721c78b0c824a3dc6c75bd848291a502493cccb3ea953a09810 name: postgresql-private-libs - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-aarch64-appstream-rpms size: 801000 @@ -207,20 +207,20 @@ arches: name: dbus-tools evr: 1:1.12.8-27.el8_10 sourcerpm: dbus-1.12.8-27.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.2.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.3.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 384528 - checksum: sha256:033c79a4a469245425c6293ab95ffa7963eaabe0871a1266ab7bedfe8670e0de + size: 383544 + checksum: sha256:c2b79444a0fd22507b15728b6e0e33364af0f15c540ed9197f60a46b2deda559 name: device-mapper - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.2.aarch64.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.3.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 409080 - checksum: sha256:015d8a22550b22dffcd0da73bcd32517a3cd8d5e35c11723eea1ca6d31a5e50e + size: 408068 + checksum: sha256:3fec4e2ec36933b654e90252c1e101c6bdbfc643eda693f1f0dcf3bc4403d8dd name: device-mapper-libs - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/d/diffutils-3.6-6.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 360676 @@ -333,13 +333,13 @@ arches: name: gettext-libs evr: 0.19.8.1-17.el8 sourcerpm: gettext-0.19.8.1-17.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/g/glib2-2.56.4-167.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/g/glib2-2.56.4-168.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 2555008 - checksum: sha256:7b666d9dda0acf4abba674ee76037c390aeeb91230367603302acae208f701a5 + size: 2554640 + checksum: sha256:66f593a251f997a439a2a278aa10640a09e018697b3ba1fe7918ce646314f021 name: glib2 - evr: 2.56.4-167.el8_10 - sourcerpm: glib2-2.56.4-167.el8_10.src.rpm + evr: 2.56.4-168.el8_10 + sourcerpm: glib2-2.56.4-168.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/g/glibc-2.28-251.el8_10.27.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 1884224 @@ -886,13 +886,13 @@ arches: name: openssl-pkcs11 evr: 0.4.10-3.el8 sourcerpm: openssl-pkcs11-0.4.10-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/o/os-prober-1.74-9.el8.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/o/os-prober-1.74-11.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 56816 - checksum: sha256:cdc45c915674781f5db481d48a9ca6f40ed0ff1971c3bcb68c41da1aa00081e5 + size: 56284 + checksum: sha256:9adcbcb24e5dfd4cf8345dca4473073598c1f666c07543ca725b86fb5ae2b862 name: os-prober - evr: 1.74-9.el8 - sourcerpm: os-prober-1.74-9.el8.src.rpm + evr: 1.74-11.el8_10 + sourcerpm: os-prober-1.74-11.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/p11-kit-0.23.22-2.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 316568 @@ -935,13 +935,13 @@ arches: name: pigz evr: 2.4-4.el8 sourcerpm: pigz-2.4-4.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/platform-python-3.6.8-71.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/platform-python-3.6.8-72.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 89844 - checksum: sha256:22b8476ce131da79ae5afdec7c685f9f7c3cfeac3303d2eccde6ef9e97279584 + size: 89944 + checksum: sha256:44dc3b2650801f02fdfc089b50221fb221d74119b42eb29158f3786a58f42c3c name: platform-python - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/platform-python-pip-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 1633024 @@ -977,13 +977,13 @@ arches: name: publicsuffix-list-dafsa evr: 20180723-1.el8 sourcerpm: publicsuffix-list-20180723-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/python3-libs-3.6.8-71.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/python3-libs-3.6.8-72.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 8113884 - checksum: sha256:d17af894fad9fe0582b21f9abb6371c0eda62753101b9fe373928092b509d38c + size: 8114508 + checksum: sha256:e71f0faa3e07a074a216a0dd405a3e33f13aecf4fceb2e64e38cf83e29559651 name: python3-libs - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/p/python3-pip-wheel-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 886996 @@ -1068,34 +1068,34 @@ arches: name: sqlite-libs evr: 3.26.0-20.el8_10 sourcerpm: sqlite-3.26.0-20.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-239-82.el8_10.8.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-239-82.el8_10.13.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 3513088 - checksum: sha256:c94f8fcedeb97ef9908137ec0d8b9a7618ab82618b3ecf7f345e25aa7ed5a267 + size: 3514456 + checksum: sha256:529f476e7b67ac581382275d9365490ba3b24f9cd7a68a8937dc22a97c8cc516 name: systemd - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-libs-239-82.el8_10.8.aarch64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-libs-239-82.el8_10.13.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 1097868 - checksum: sha256:422451943b582df3f0ed2b30347c5fef85455beffe5a58cb67c2f1783d8a11a2 + size: 1099868 + checksum: sha256:f78d5d354e950c37542dc4cb1155c3cd490b3f844d3319362ec730274be345f5 name: systemd-libs - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-pam-239-82.el8_10.8.aarch64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-pam-239-82.el8_10.13.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 487656 - checksum: sha256:af8b62ab3a3b79c2cbd7130c8a5a97801dfe1f4a3c266ef1b421db443bf90eb4 + size: 489180 + checksum: sha256:740ee08a8727c7672aefbf21c6d4afbdcbfb05076db43139a9b5adc1c70bbb7d name: systemd-pam - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-udev-239-82.el8_10.8.aarch64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/s/systemd-udev-239-82.el8_10.13.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 1626444 - checksum: sha256:a77e034e5905688284937ae0ba9c998a6e1dd926240dd428653c09840f94ef9e + size: 1627748 + checksum: sha256:296e74ad60ed8ba21439f5fed1578baee5cd2e37b659e125990380e3d6391b9b name: systemd-udev - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/t/trousers-0.3.15-2.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 151356 @@ -1171,12 +1171,12 @@ arches: checksum: sha256:31cd372131f6eb404ce90285210fd74021914b4eb52e933b2aeebfa955099faa name: oniguruma evr: 6.8.2-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm repoid: rhel-8-for-aarch64-appstream-source-rpms - size: 53572917 - checksum: sha256:4a2c66b6b48cbf761ed5d454022f80fd6e63f89a84f095dac2683663960e9272 + size: 45654458 + checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-aarch64-appstream-source-rpms size: 1699339 @@ -1363,12 +1363,12 @@ arches: checksum: sha256:114be9b072a7726f2ac557fda6b8a86254ae3b7ed984ed14cfa7733bea9005d4 name: gettext evr: 0.19.8.1-17.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/g/glib2-2.56.4-167.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/g/glib2-2.56.4-168.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 7164394 - checksum: sha256:80ee50b39aa478e1503dbd18626df91a023d30e3f9b6fb588fa82e6ce2b5972e + size: 7169961 + checksum: sha256:6b67584ae03d06c58331b29141f63b0b86e256ddef78ec9c48f80bdfcdb76890 name: glib2 - evr: 2.56.4-167.el8_10 + evr: 2.56.4-168.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/g/glibc-2.28-251.el8_10.27.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 18525139 @@ -1609,12 +1609,12 @@ arches: checksum: sha256:764fa61f3a6678bf93d94351468e49863176420688ab4e8c1aa6a5eb84ecf23d name: lua evr: 5.3.4-12.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.2.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.3.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 3185875 - checksum: sha256:c464de2287071dd2c498af02ce271d6153a354bfbce16b271c7131f98a55f8c8 + size: 3196004 + checksum: sha256:350e26dbc6f830fdb3548319013264cb2049858907c093a90b84a5945f6a4835 name: lvm2 - evr: 8:2.03.14-15.el8_10.2 + evr: 8:2.03.14-15.el8_10.3 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/l/lz4-1.8.3-5.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 347074 @@ -1669,12 +1669,12 @@ arches: checksum: sha256:a737e7fe890c5f53c1bc0c5925375791d8890f9d51c4a509091b41efa3f92861 name: openssl-pkcs11 evr: 0.4.10-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/o/os-prober-1.74-9.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/o/os-prober-1.74-11.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 55171 - checksum: sha256:0577008638e1644fed230d55b221b485e6cdc702cda9c27cf74ab7adcb8b8f00 + size: 55952 + checksum: sha256:602a6f146d9b36de4d52f744ad8d4084b87515e16a04c98c7d21368e7351194e name: os-prober - evr: 1.74-9.el8 + evr: 1.74-11.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/p/p11-kit-0.23.22-2.el8.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 909983 @@ -1735,12 +1735,12 @@ arches: checksum: sha256:31ae9c84f36f7d4e51b0e945e5d12210594defd3ea16cf5645c21d42fd6332fa name: python-setuptools evr: 39.2.0-9.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/p/python3-3.6.8-71.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/p/python3-3.6.8-72.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 19241145 - checksum: sha256:e7012ea23f3816772d64357f7db534f83c55279bcbefdecaefe2573d4734c6a3 + size: 19243590 + checksum: sha256:55193ea63a0505c0beb8f705ce3edb4299c661044695ce22b8321b51485341d5 name: python3 - evr: 3.6.8-71.el8_10 + evr: 3.6.8-72.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/r/readline-7.0-10.el8.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 2937518 @@ -1789,12 +1789,12 @@ arches: checksum: sha256:26dc49ea369dc145166e0a3959cc132f45e3345b99a75420c8932af24f44668c name: sqlite evr: 3.26.0-20.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.13.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 9188443 - checksum: sha256:a3ade60f73bb3137b94ac38205c321511b70e2bf61b79e2a25e31015fb415844 + size: 9204931 + checksum: sha256:6176d1736de4a4ff55021f7ad56e8cf5fdb459514f2ac45422c41bbfd5957a7b name: systemd - evr: 239-82.el8_10.8 + evr: 239-82.el8_10.13 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/t/texinfo-6.5-7.el8.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 4544531 @@ -1844,10 +1844,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/repodata/1f6247087f10c5eaeccd512675154440e86abecf4842fae14e6d9835b61d44c7-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/repodata/25be956846b5cef874bf04027dd634d1f2ab0e79fc2273ade1b8ddde58acb20f-modules.yaml.gz repoid: rhel-8-for-aarch64-appstream-rpms - size: 755348 - checksum: sha256:1f6247087f10c5eaeccd512675154440e86abecf4842fae14e6d9835b61d44c7 + size: 760949 + checksum: sha256:25be956846b5cef874bf04027dd634d1f2ab0e79fc2273ade1b8ddde58acb20f - arch: ppc64le packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/j/jq-1.6-11.el8_10.ppc64le.rpm @@ -1871,20 +1871,20 @@ arches: name: oniguruma evr: 6.8.2-3.el8 sourcerpm: oniguruma-6.8.2-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.ppc64le.rpm repoid: rhel-8-for-ppc64le-appstream-rpms - size: 1864363 - checksum: sha256:6eb4836967b76fc22d7fe6c58cfeee10f51e7fd8902e99597d0ed5ee8328a600 + size: 1877303 + checksum: sha256:e60f7686d3bea0245ef49253c5705f2b66a2f23ba68b2c2768955bd3c303748c name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/postgresql-private-libs-15.14-1.module+el8.10.0+23423+5a199198.ppc64le.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/postgresql-private-libs-15.15-1.module+el8.10.0+23782+2d6b2a31.ppc64le.rpm repoid: rhel-8-for-ppc64le-appstream-rpms - size: 150687 - checksum: sha256:d50be28a27596fc611792e11609c61a8e5163615ab09bbd5db4ac6df562ae7c4 + size: 151435 + checksum: sha256:80c3061d69e75e725dd9de7084f948b82e37ec98e62a8522c7a79dcea803b74a name: postgresql-private-libs - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-ppc64le-appstream-rpms size: 801000 @@ -2053,20 +2053,20 @@ arches: name: dbus-tools evr: 1:1.12.8-27.el8_10 sourcerpm: dbus-1.12.8-27.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.2.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.3.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 389876 - checksum: sha256:05e03ef1ff1bebf2b6e812aaec94e99e159c5a9e24dc787d455e4639f6d2d20c + size: 388884 + checksum: sha256:fb96e882135eb1b7c7c375b043ae9e168e043ff230f8cedb41577536a0546423 name: device-mapper - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.2.ppc64le.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.3.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 429972 - checksum: sha256:6c17208c14104afa2003a83a8104eb5ffc5784652f80aa27ab737873c2b824ea + size: 428964 + checksum: sha256:2abb3d7a7fdc091571ff63f61d0bb3f0fa85db3b6c42b5b27430f54d9e5805dc name: device-mapper-libs - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/d/diffutils-3.6-6.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 375484 @@ -2179,13 +2179,13 @@ arches: name: gettext-libs evr: 0.19.8.1-17.el8 sourcerpm: gettext-0.19.8.1-17.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/g/glib2-2.56.4-167.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/g/glib2-2.56.4-168.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 2705184 - checksum: sha256:be9bc79cace8aa17ac4e654cc8029b0a96e353e245eb9992e25db83bb9133568 + size: 2705108 + checksum: sha256:2168f27bece077449911a4fc63cb56fd49751f683b379fdc90bf3d63e513c44d name: glib2 - evr: 2.56.4-167.el8_10 - sourcerpm: glib2-2.56.4-167.el8_10.src.rpm + evr: 2.56.4-168.el8_10 + sourcerpm: glib2-2.56.4-168.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/g/glibc-2.28-251.el8_10.27.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 3516168 @@ -2739,13 +2739,13 @@ arches: name: openssl-pkcs11 evr: 0.4.10-3.el8 sourcerpm: openssl-pkcs11-0.4.10-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/o/os-prober-1.74-9.el8.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/o/os-prober-1.74-11.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 57116 - checksum: sha256:0db7648b03c119d2a1361712aed1c06e8ab0780adb29572da2d11a7f5e7e6770 + size: 56564 + checksum: sha256:a9988162a41de0029b12bef9ce2ceb91aa3e7c53220064a6f9aa2f780ddeb688 name: os-prober - evr: 1.74-9.el8 - sourcerpm: os-prober-1.74-9.el8.src.rpm + evr: 1.74-11.el8_10 + sourcerpm: os-prober-1.74-11.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/p11-kit-0.23.22-2.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 335268 @@ -2788,13 +2788,13 @@ arches: name: pigz evr: 2.4-4.el8 sourcerpm: pigz-2.4-4.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/platform-python-3.6.8-71.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/platform-python-3.6.8-72.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 90356 - checksum: sha256:59053949e72772d34408de641ce14f6d717d0c5eb011bc89240c01d1e5764964 + size: 90464 + checksum: sha256:ec79879a57f4b2b06e22733567e737f4926ac63f12541b9c022e94a47ad90ef6 name: platform-python - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/platform-python-pip-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 1633024 @@ -2830,13 +2830,13 @@ arches: name: publicsuffix-list-dafsa evr: 20180723-1.el8 sourcerpm: publicsuffix-list-20180723-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/python3-libs-3.6.8-71.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/python3-libs-3.6.8-72.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 8510436 - checksum: sha256:f3de14c44b546400f9131a12fdd716e19d4df13a6a3cfeab9621793aca6c94e6 + size: 8538088 + checksum: sha256:2beff99a839eccdda0d1eab9f904714dd0e1fa488e3c54b961baaccfb93801d0 name: python3-libs - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/p/python3-pip-wheel-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 886996 @@ -2921,34 +2921,34 @@ arches: name: sqlite-libs evr: 3.26.0-20.el8_10 sourcerpm: sqlite-3.26.0-20.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-239-82.el8_10.8.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-239-82.el8_10.13.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 3886804 - checksum: sha256:861ab3ad615884b38cd186ade1cc74249e0b5a3e7465318504ab08c8355c4842 + size: 3889304 + checksum: sha256:1ede7d1324cb3c51492693d4ecb8b5e4c4e93544a7a4a62afc00e83535789fb0 name: systemd - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-libs-239-82.el8_10.8.ppc64le.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-libs-239-82.el8_10.13.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 1201844 - checksum: sha256:255bcea5675d0b6415cf95f7e44993cb5d2c9dbe300c0199fb5e7875f277af1c + size: 1203244 + checksum: sha256:a966bbf40612aef09d92b2fe0684bdf304c6f4778927106c1170ee428bc62c7b name: systemd-libs - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-pam-239-82.el8_10.8.ppc64le.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-pam-239-82.el8_10.13.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 552628 - checksum: sha256:f8800e69da7366f813d16c43c22d5ad77a36cc2b6befbbcdbcf71819adc97283 + size: 554400 + checksum: sha256:4157bb1244b7102308cc1da1bed8d757806c29ee0d7dfb7b323ab01f345d8ecc name: systemd-pam - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-udev-239-82.el8_10.8.ppc64le.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/s/systemd-udev-239-82.el8_10.13.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 1627128 - checksum: sha256:45511abfd2506c0145cefbac5cc195cf526f6c71c484f15491937db7f0f1ce43 + size: 1628676 + checksum: sha256:41b00948a476a5495cbac6dffbede3b534888fdb585cecfdee763375139eeb4c name: systemd-udev - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/t/trousers-0.3.15-2.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 159044 @@ -3024,12 +3024,12 @@ arches: checksum: sha256:31cd372131f6eb404ce90285210fd74021914b4eb52e933b2aeebfa955099faa name: oniguruma evr: 6.8.2-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm repoid: rhel-8-for-ppc64le-appstream-source-rpms - size: 53572917 - checksum: sha256:4a2c66b6b48cbf761ed5d454022f80fd6e63f89a84f095dac2683663960e9272 + size: 45654458 + checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-ppc64le-appstream-source-rpms size: 1699339 @@ -3216,12 +3216,12 @@ arches: checksum: sha256:114be9b072a7726f2ac557fda6b8a86254ae3b7ed984ed14cfa7733bea9005d4 name: gettext evr: 0.19.8.1-17.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/g/glib2-2.56.4-167.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/g/glib2-2.56.4-168.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 7164394 - checksum: sha256:80ee50b39aa478e1503dbd18626df91a023d30e3f9b6fb588fa82e6ce2b5972e + size: 7169961 + checksum: sha256:6b67584ae03d06c58331b29141f63b0b86e256ddef78ec9c48f80bdfcdb76890 name: glib2 - evr: 2.56.4-167.el8_10 + evr: 2.56.4-168.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/g/glibc-2.28-251.el8_10.27.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 18525139 @@ -3468,12 +3468,12 @@ arches: checksum: sha256:764fa61f3a6678bf93d94351468e49863176420688ab4e8c1aa6a5eb84ecf23d name: lua evr: 5.3.4-12.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.2.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.3.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 3185875 - checksum: sha256:c464de2287071dd2c498af02ce271d6153a354bfbce16b271c7131f98a55f8c8 + size: 3196004 + checksum: sha256:350e26dbc6f830fdb3548319013264cb2049858907c093a90b84a5945f6a4835 name: lvm2 - evr: 8:2.03.14-15.el8_10.2 + evr: 8:2.03.14-15.el8_10.3 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/l/lz4-1.8.3-5.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 347074 @@ -3528,12 +3528,12 @@ arches: checksum: sha256:a737e7fe890c5f53c1bc0c5925375791d8890f9d51c4a509091b41efa3f92861 name: openssl-pkcs11 evr: 0.4.10-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/o/os-prober-1.74-9.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/o/os-prober-1.74-11.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 55171 - checksum: sha256:0577008638e1644fed230d55b221b485e6cdc702cda9c27cf74ab7adcb8b8f00 + size: 55952 + checksum: sha256:602a6f146d9b36de4d52f744ad8d4084b87515e16a04c98c7d21368e7351194e name: os-prober - evr: 1.74-9.el8 + evr: 1.74-11.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/p/p11-kit-0.23.22-2.el8.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 909983 @@ -3594,12 +3594,12 @@ arches: checksum: sha256:31ae9c84f36f7d4e51b0e945e5d12210594defd3ea16cf5645c21d42fd6332fa name: python-setuptools evr: 39.2.0-9.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/p/python3-3.6.8-71.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/p/python3-3.6.8-72.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 19241145 - checksum: sha256:e7012ea23f3816772d64357f7db534f83c55279bcbefdecaefe2573d4734c6a3 + size: 19243590 + checksum: sha256:55193ea63a0505c0beb8f705ce3edb4299c661044695ce22b8321b51485341d5 name: python3 - evr: 3.6.8-71.el8_10 + evr: 3.6.8-72.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/r/readline-7.0-10.el8.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 2937518 @@ -3648,12 +3648,12 @@ arches: checksum: sha256:26dc49ea369dc145166e0a3959cc132f45e3345b99a75420c8932af24f44668c name: sqlite evr: 3.26.0-20.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.13.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 9188443 - checksum: sha256:a3ade60f73bb3137b94ac38205c321511b70e2bf61b79e2a25e31015fb415844 + size: 9204931 + checksum: sha256:6176d1736de4a4ff55021f7ad56e8cf5fdb459514f2ac45422c41bbfd5957a7b name: systemd - evr: 239-82.el8_10.8 + evr: 239-82.el8_10.13 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/t/texinfo-6.5-7.el8.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 4544531 @@ -3703,10 +3703,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/repodata/82f87cf7ecb10bffa94c725b33719688bb26f81e8390cfbde8c23b0d68b0afe8-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/repodata/df2eeda2b11e5c9b9f11a7a97a9a2c4725f099aa334f08e77e59bcc8055068a6-modules.yaml.gz repoid: rhel-8-for-ppc64le-appstream-rpms - size: 752834 - checksum: sha256:82f87cf7ecb10bffa94c725b33719688bb26f81e8390cfbde8c23b0d68b0afe8 + size: 758113 + checksum: sha256:df2eeda2b11e5c9b9f11a7a97a9a2c4725f099aa334f08e77e59bcc8055068a6 - arch: s390x packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/j/jq-1.6-11.el8_10.s390x.rpm @@ -3786,20 +3786,20 @@ arches: name: perl-libnet evr: 3.11-3.el8 sourcerpm: perl-libnet-3.11-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.s390x.rpm repoid: rhel-8-for-s390x-appstream-rpms - size: 1769979 - checksum: sha256:cb724993ae1ac5099ba3eebd84fdb7acc8716f3502a3c794f011046c00bfadd8 + size: 1782631 + checksum: sha256:f0afab8138e3ce7ab6bb930e9a3ac03f03c1616abedbca3390c0fde2a3f39479 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/postgresql-private-libs-15.14-1.module+el8.10.0+23423+5a199198.s390x.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/postgresql-private-libs-15.15-1.module+el8.10.0+23782+2d6b2a31.s390x.rpm repoid: rhel-8-for-s390x-appstream-rpms - size: 128367 - checksum: sha256:bb5e475ffe306636145ecc909d945341326346ea34f1564674f46e933aff3dea + size: 128795 + checksum: sha256:32f31046e29c5dc0d48ae70fd732028069dbb5d6531a2bd72b538b850f27ffd1 name: postgresql-private-libs - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-s390x-appstream-rpms size: 801000 @@ -3961,20 +3961,20 @@ arches: name: dbus-tools evr: 1:1.12.8-27.el8_10 sourcerpm: dbus-1.12.8-27.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.2.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.3.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 383276 - checksum: sha256:922dfc2ec369944152bd17078c901d709dbaec818f02a21ce7fce96d24820a21 + size: 382296 + checksum: sha256:5ab3b0bdfe8afc2c73f29a68b0fa49956891db99ca429d4e52b86360b7961e8b name: device-mapper - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.2.s390x.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.3.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 408880 - checksum: sha256:a25311e54344b073e5bb455a640ff79214616edbbf4f4121fffa6de9f6148782 + size: 407904 + checksum: sha256:ac30ee54bf7f236bf3ef453d383e886adb920ba95dc08dc130d8a4603895cc01 name: device-mapper-libs - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/d/diffutils-3.6-6.el8.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 364352 @@ -4080,13 +4080,13 @@ arches: name: gdbm-libs evr: 1:1.18-2.el8 sourcerpm: gdbm-1.18-2.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/g/glib2-2.56.4-167.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/g/glib2-2.56.4-168.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 2533872 - checksum: sha256:2a62ce51041c154b6fd8eb53d047282f8a24c564bdea96e03bda3f2b0073d468 + size: 2534132 + checksum: sha256:a71c1f62688f6b25c654634891583662ee14ac89fb7b81bbb81269cb9c80a47c name: glib2 - evr: 2.56.4-167.el8_10 - sourcerpm: glib2-2.56.4-167.el8_10.src.rpm + evr: 2.56.4-168.el8_10 + sourcerpm: glib2-2.56.4-168.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/g/glibc-2.28-251.el8_10.27.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 1876988 @@ -4815,13 +4815,13 @@ arches: name: perl-threads-shared evr: 1.58-2.el8 sourcerpm: perl-threads-shared-1.58-2.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/platform-python-3.6.8-71.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/platform-python-3.6.8-72.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 89660 - checksum: sha256:22867ac3e41db8b68fc13a3400e371c2e2702a76009a5c24c95c1325e5ec6138 + size: 89760 + checksum: sha256:06cc2319c125db7607df38573749679efd766c8fccda428222ce20239385269b name: platform-python - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/platform-python-pip-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 1633024 @@ -4850,13 +4850,13 @@ arches: name: publicsuffix-list-dafsa evr: 20180723-1.el8 sourcerpm: publicsuffix-list-20180723-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/python3-libs-3.6.8-71.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/python3-libs-3.6.8-72.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 8057676 - checksum: sha256:eb9bf761561e1d4fe6350953c4a8d87ed1f56d97ac35343d525074d3d359083c + size: 8054388 + checksum: sha256:19f923c0be7cef9341ccaa8b6f8a2b42cbddfc340ed6b02984dfe52d761ccb9f name: python3-libs - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/p/python3-pip-wheel-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 886996 @@ -4976,27 +4976,27 @@ arches: name: sqlite-libs evr: 3.26.0-20.el8_10 sourcerpm: sqlite-3.26.0-20.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-239-82.el8_10.8.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-239-82.el8_10.13.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 3520204 - checksum: sha256:dd8d041a9dc0c0657ba446245f36e2aef4dfe36821d7678df431cc91e1389a44 + size: 3522640 + checksum: sha256:719d568c6145a738905efddd2aae97cf295f4217995871e97ebd314f3258ff8b name: systemd - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-libs-239-82.el8_10.8.s390x.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-libs-239-82.el8_10.13.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 1065440 - checksum: sha256:233b13eae1aa0000b2deafd6d1e95e6dfa352887833a710794ad9ce62f4aa027 + size: 1067400 + checksum: sha256:a2e67d8e9a6c1cc3a02c3fce2923b0c918a6d0cb612fda63f375162404f489c8 name: systemd-libs - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-pam-239-82.el8_10.8.s390x.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/s/systemd-pam-239-82.el8_10.13.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 487296 - checksum: sha256:0ef21f20d6154582fc178ecb94e9a326b53357ccd54e3c16eaa8d4dbad8cef34 + size: 488896 + checksum: sha256:5438bba37b2ab629d5c7bfb9c4f99a48e28c0bdc7edb2e058f59ed07118accc1 name: systemd-pam - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/t/tar-1.30-11.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 852716 @@ -5113,12 +5113,12 @@ arches: checksum: sha256:dc91b0b1230e700b03f6bf9b67e7e1888a40fb3cba04407be800ebe03b3f6632 name: perl-libnet evr: 3.11-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm repoid: rhel-8-for-s390x-appstream-source-rpms - size: 53572917 - checksum: sha256:4a2c66b6b48cbf761ed5d454022f80fd6e63f89a84f095dac2683663960e9272 + size: 45654458 + checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-s390x-appstream-source-rpms size: 1699339 @@ -5293,12 +5293,12 @@ arches: checksum: sha256:e91abeb46538fc264936c0eed825c28eab9eef47288c9eb1d2d4d078bccad5d1 name: gdbm evr: 1:1.18-2.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/g/glib2-2.56.4-167.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/g/glib2-2.56.4-168.el8_10.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms - size: 7164394 - checksum: sha256:80ee50b39aa478e1503dbd18626df91a023d30e3f9b6fb588fa82e6ce2b5972e + size: 7169961 + checksum: sha256:6b67584ae03d06c58331b29141f63b0b86e256ddef78ec9c48f80bdfcdb76890 name: glib2 - evr: 2.56.4-167.el8_10 + evr: 2.56.4-168.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/g/glibc-2.28-251.el8_10.27.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 18525139 @@ -5521,12 +5521,12 @@ arches: checksum: sha256:764fa61f3a6678bf93d94351468e49863176420688ab4e8c1aa6a5eb84ecf23d name: lua evr: 5.3.4-12.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.2.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.3.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms - size: 3185875 - checksum: sha256:c464de2287071dd2c498af02ce271d6153a354bfbce16b271c7131f98a55f8c8 + size: 3196004 + checksum: sha256:350e26dbc6f830fdb3548319013264cb2049858907c093a90b84a5945f6a4835 name: lvm2 - evr: 8:2.03.14-15.el8_10.2 + evr: 8:2.03.14-15.el8_10.3 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/l/lz4-1.8.3-5.el8_10.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 347074 @@ -5797,12 +5797,12 @@ arches: checksum: sha256:31ae9c84f36f7d4e51b0e945e5d12210594defd3ea16cf5645c21d42fd6332fa name: python-setuptools evr: 39.2.0-9.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/p/python3-3.6.8-71.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/p/python3-3.6.8-72.el8_10.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms - size: 19241145 - checksum: sha256:e7012ea23f3816772d64357f7db534f83c55279bcbefdecaefe2573d4734c6a3 + size: 19243590 + checksum: sha256:55193ea63a0505c0beb8f705ce3edb4299c661044695ce22b8321b51485341d5 name: python3 - evr: 3.6.8-71.el8_10 + evr: 3.6.8-72.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/r/readline-7.0-10.el8.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 2937518 @@ -5863,12 +5863,12 @@ arches: checksum: sha256:26dc49ea369dc145166e0a3959cc132f45e3345b99a75420c8932af24f44668c name: sqlite evr: 3.26.0-20.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.13.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms - size: 9188443 - checksum: sha256:a3ade60f73bb3137b94ac38205c321511b70e2bf61b79e2a25e31015fb415844 + size: 9204931 + checksum: sha256:6176d1736de4a4ff55021f7ad56e8cf5fdb459514f2ac45422c41bbfd5957a7b name: systemd - evr: 239-82.el8_10.8 + evr: 239-82.el8_10.13 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/t/tar-1.30-11.el8_10.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 2173356 @@ -5918,10 +5918,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/repodata/2aab1f5f2b7b1ae8a7f1106bb2ffc435b97804e418e7c63776b5dc8dca03888b-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/repodata/d817cfb2041d9563617f1459186cd4ee7aa2c8c94fe9ef9922df8145a0f77ff4-modules.yaml.gz repoid: rhel-8-for-s390x-appstream-rpms - size: 755049 - checksum: sha256:2aab1f5f2b7b1ae8a7f1106bb2ffc435b97804e418e7c63776b5dc8dca03888b + size: 759639 + checksum: sha256:d817cfb2041d9563617f1459186cd4ee7aa2c8c94fe9ef9922df8145a0f77ff4 - arch: x86_64 packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/j/jq-1.6-11.el8_10.x86_64.rpm @@ -5945,20 +5945,20 @@ arches: name: oniguruma evr: 6.8.2-3.el8 sourcerpm: oniguruma-6.8.2-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.x86_64.rpm repoid: rhel-8-for-x86_64-appstream-rpms - size: 1813083 - checksum: sha256:c2cf4c01ed074e4516957fb5f846e667b47a5743d99897713906f0d45290477f + size: 1825339 + checksum: sha256:f2a10392b2b79daaa1d16f10792f51abe4d1a2bbb1478827fd96a2d70b943d52 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/postgresql-private-libs-15.14-1.module+el8.10.0+23423+5a199198.x86_64.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/postgresql-private-libs-15.15-1.module+el8.10.0+23782+2d6b2a31.x86_64.rpm repoid: rhel-8-for-x86_64-appstream-rpms - size: 135811 - checksum: sha256:81a9c927ccdca57c8087daebdda3727b4324089c86da2f748b567d200b4b17a4 + size: 136383 + checksum: sha256:895d79b877c063d33ef9806403e5a70815e7c5720556dd93b1aa3d041b1703e8 name: postgresql-private-libs - evr: 15.14-1.module+el8.10.0+23423+5a199198 - sourcerpm: postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-x86_64-appstream-rpms size: 801000 @@ -6127,20 +6127,20 @@ arches: name: dbus-tools evr: 1:1.12.8-27.el8_10 sourcerpm: dbus-1.12.8-27.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.2.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/d/device-mapper-1.02.181-15.el8_10.3.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 388312 - checksum: sha256:27d2bc6fa33c8b98a37e29161a78ed505c27ecc7daaa10517cdcacc2f99ebbbf + size: 387384 + checksum: sha256:05cb08497da4dbfb3074826936eabb3ef0eee0f8908021efa2a88157951de82b name: device-mapper - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.2.x86_64.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/d/device-mapper-libs-1.02.181-15.el8_10.3.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 421472 - checksum: sha256:87b14770a42ae859889e69bd29b9d368e080e0635b86d6d651d84aa0949255d5 + size: 420456 + checksum: sha256:c47db9dc78ddb13dae0be22de6ea1d5df56c2f0754d99c5b45e6642717ed3508 name: device-mapper-libs - evr: 8:1.02.181-15.el8_10.2 - sourcerpm: lvm2-2.03.14-15.el8_10.2.src.rpm + evr: 8:1.02.181-15.el8_10.3 + sourcerpm: lvm2-2.03.14-15.el8_10.3.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/d/diffutils-3.6-6.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 367420 @@ -6253,13 +6253,13 @@ arches: name: gettext-libs evr: 0.19.8.1-17.el8 sourcerpm: gettext-0.19.8.1-17.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/g/glib2-2.56.4-167.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/g/glib2-2.56.4-168.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 2614852 - checksum: sha256:c3f59a03d02b1ee00cca137485a66b63842e395eca465f0663426ac6933bd6ff + size: 2615096 + checksum: sha256:5cf4e3858b66203dab484bb768c4e59c7c5c5061dd9d635c60406e9369f9a7d3 name: glib2 - evr: 2.56.4-167.el8_10 - sourcerpm: glib2-2.56.4-167.el8_10.src.rpm + evr: 2.56.4-168.el8_10 + sourcerpm: glib2-2.56.4-168.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/g/glibc-2.28-251.el8_10.27.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 2307356 @@ -6806,13 +6806,13 @@ arches: name: openssl-pkcs11 evr: 0.4.10-3.el8 sourcerpm: openssl-pkcs11-0.4.10-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/o/os-prober-1.74-9.el8.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/o/os-prober-1.74-11.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 52600 - checksum: sha256:2711faf7ba62de2e1b8254f1787be9be2e1354cc43a64af2744f32f16877ebfd + size: 52056 + checksum: sha256:e39e4cd7ded77fdb9832d05884a5c8cd911493613ca51788f58bfc2f87951241 name: os-prober - evr: 1.74-9.el8 - sourcerpm: os-prober-1.74-9.el8.src.rpm + evr: 1.74-11.el8_10 + sourcerpm: os-prober-1.74-11.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/p11-kit-0.23.22-2.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 334852 @@ -6855,13 +6855,13 @@ arches: name: pigz evr: 2.4-4.el8 sourcerpm: pigz-2.4-4.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/platform-python-3.6.8-71.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/platform-python-3.6.8-72.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 89788 - checksum: sha256:225f9e81e7ff60618c43bdb2fd9d46b43c1ec7d195faf7704dacead5f6bbffe4 + size: 89900 + checksum: sha256:65927e53c1bc6aa3de47556ab68d5fef84dea6043545c493824ff3bb7a263b09 name: platform-python - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/platform-python-pip-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 1633024 @@ -6897,13 +6897,13 @@ arches: name: publicsuffix-list-dafsa evr: 20180723-1.el8 sourcerpm: publicsuffix-list-20180723-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/python3-libs-3.6.8-71.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/python3-libs-3.6.8-72.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 8250824 - checksum: sha256:b80ef80e565941803678ce69506358c269cb2d02a862642199a5b22d20ca52a4 + size: 8250656 + checksum: sha256:894b54365c7cd91ba029efcb98714e6ac3886971554baeb16cef883d5687b473 name: python3-libs - evr: 3.6.8-71.el8_10 - sourcerpm: python3-3.6.8-71.el8_10.src.rpm + evr: 3.6.8-72.el8_10 + sourcerpm: python3-3.6.8-72.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/p/python3-pip-wheel-9.0.3-24.el8.noarch.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 886996 @@ -6988,34 +6988,34 @@ arches: name: sqlite-libs evr: 3.26.0-20.el8_10 sourcerpm: sqlite-3.26.0-20.el8_10.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-239-82.el8_10.8.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-239-82.el8_10.13.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 3828188 - checksum: sha256:f4610daaffe36789deafdead6dfb621e1e722b151541bf259bffcaf892ce9bb2 + size: 3830800 + checksum: sha256:30e7904ba7d991dd821d4be6cfee6dc6db5a65a8bd6e482502e6d2d6e71c58e9 name: systemd - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-libs-239-82.el8_10.8.x86_64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-libs-239-82.el8_10.13.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 1197600 - checksum: sha256:d77b012f317bb7736f25e2b03bc912f983bb265c4447890eb41a009ce3f5fa56 + size: 1199360 + checksum: sha256:af1bc1cd605f1e08f6e7a3e073338565f8608b2a74232be9892a047f88524128 name: systemd-libs - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-pam-239-82.el8_10.8.x86_64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-pam-239-82.el8_10.13.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 527476 - checksum: sha256:41a9be02b2a65f87bdf1546a3a034c5cd5aea85e187e9e1bc45efbe30897b3ee + size: 529240 + checksum: sha256:b1d738e31d1db8c3759f5e9913fcc7ec7b1fbfd90a2f271d18f8a30ca050e57a name: systemd-pam - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-udev-239-82.el8_10.8.x86_64.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/s/systemd-udev-239-82.el8_10.13.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 1665148 - checksum: sha256:c6b70e68144dc63475ef6bc43eb1483dd9b06b56929d1d22f660ebfe871f487b + size: 1666576 + checksum: sha256:c0a90b3e83143b2160a1e850502e54299986fe29e0382c297f7d0121e1aed4f7 name: systemd-udev - evr: 239-82.el8_10.8 - sourcerpm: systemd-239-82.el8_10.8.src.rpm + evr: 239-82.el8_10.13 + sourcerpm: systemd-239-82.el8_10.13.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/t/trousers-0.3.15-2.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 156324 @@ -7091,12 +7091,12 @@ arches: checksum: sha256:31cd372131f6eb404ce90285210fd74021914b4eb52e933b2aeebfa955099faa name: oniguruma evr: 6.8.2-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/p/postgresql-15.14-1.module+el8.10.0+23423+5a199198.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/p/postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm repoid: rhel-8-for-x86_64-appstream-source-rpms - size: 53572917 - checksum: sha256:4a2c66b6b48cbf761ed5d454022f80fd6e63f89a84f095dac2683663960e9272 + size: 45654458 + checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql - evr: 15.14-1.module+el8.10.0+23423+5a199198 + evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-x86_64-appstream-source-rpms size: 1699339 @@ -7283,12 +7283,12 @@ arches: checksum: sha256:114be9b072a7726f2ac557fda6b8a86254ae3b7ed984ed14cfa7733bea9005d4 name: gettext evr: 0.19.8.1-17.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/g/glib2-2.56.4-167.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/g/glib2-2.56.4-168.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 7164394 - checksum: sha256:80ee50b39aa478e1503dbd18626df91a023d30e3f9b6fb588fa82e6ce2b5972e + size: 7169961 + checksum: sha256:6b67584ae03d06c58331b29141f63b0b86e256ddef78ec9c48f80bdfcdb76890 name: glib2 - evr: 2.56.4-167.el8_10 + evr: 2.56.4-168.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/g/glibc-2.28-251.el8_10.27.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 18525139 @@ -7529,12 +7529,12 @@ arches: checksum: sha256:764fa61f3a6678bf93d94351468e49863176420688ab4e8c1aa6a5eb84ecf23d name: lua evr: 5.3.4-12.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.2.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/l/lvm2-2.03.14-15.el8_10.3.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 3185875 - checksum: sha256:c464de2287071dd2c498af02ce271d6153a354bfbce16b271c7131f98a55f8c8 + size: 3196004 + checksum: sha256:350e26dbc6f830fdb3548319013264cb2049858907c093a90b84a5945f6a4835 name: lvm2 - evr: 8:2.03.14-15.el8_10.2 + evr: 8:2.03.14-15.el8_10.3 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/l/lz4-1.8.3-5.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 347074 @@ -7589,12 +7589,12 @@ arches: checksum: sha256:a737e7fe890c5f53c1bc0c5925375791d8890f9d51c4a509091b41efa3f92861 name: openssl-pkcs11 evr: 0.4.10-3.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/o/os-prober-1.74-9.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/o/os-prober-1.74-11.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 55171 - checksum: sha256:0577008638e1644fed230d55b221b485e6cdc702cda9c27cf74ab7adcb8b8f00 + size: 55952 + checksum: sha256:602a6f146d9b36de4d52f744ad8d4084b87515e16a04c98c7d21368e7351194e name: os-prober - evr: 1.74-9.el8 + evr: 1.74-11.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/p/p11-kit-0.23.22-2.el8.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 909983 @@ -7655,12 +7655,12 @@ arches: checksum: sha256:31ae9c84f36f7d4e51b0e945e5d12210594defd3ea16cf5645c21d42fd6332fa name: python-setuptools evr: 39.2.0-9.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/p/python3-3.6.8-71.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/p/python3-3.6.8-72.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 19241145 - checksum: sha256:e7012ea23f3816772d64357f7db534f83c55279bcbefdecaefe2573d4734c6a3 + size: 19243590 + checksum: sha256:55193ea63a0505c0beb8f705ce3edb4299c661044695ce22b8321b51485341d5 name: python3 - evr: 3.6.8-71.el8_10 + evr: 3.6.8-72.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/r/readline-7.0-10.el8.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 2937518 @@ -7709,12 +7709,12 @@ arches: checksum: sha256:26dc49ea369dc145166e0a3959cc132f45e3345b99a75420c8932af24f44668c name: sqlite evr: 3.26.0-20.el8_10 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/s/systemd-239-82.el8_10.13.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 9188443 - checksum: sha256:a3ade60f73bb3137b94ac38205c321511b70e2bf61b79e2a25e31015fb415844 + size: 9204931 + checksum: sha256:6176d1736de4a4ff55021f7ad56e8cf5fdb459514f2ac45422c41bbfd5957a7b name: systemd - evr: 239-82.el8_10.8 + evr: 239-82.el8_10.13 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/t/texinfo-6.5-7.el8.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 4544531 @@ -7764,7 +7764,7 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/c93f84d60dd3f9517e7fb58a40d7b88b99fca36bfc2b19cfdb461a6b511993d9-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/8a32578434a9fadf8e1c3fcc5507caa7952479a54fe8042c996b50fea309cd30-modules.yaml.gz repoid: rhel-8-for-x86_64-appstream-rpms - size: 779063 - checksum: sha256:c93f84d60dd3f9517e7fb58a40d7b88b99fca36bfc2b19cfdb461a6b511993d9 + size: 784123 + checksum: sha256:8a32578434a9fadf8e1c3fcc5507caa7952479a54fe8042c996b50fea309cd30 diff --git a/scanner/image/db/konflux.Dockerfile b/scanner/image/db/konflux.Dockerfile index 20e9af6d20e25..d91c722b4a27b 100644 --- a/scanner/image/db/konflux.Dockerfile +++ b/scanner/image/db/konflux.Dockerfile @@ -1,4 +1,4 @@ -FROM registry.redhat.io/rhel8/postgresql-15:latest@sha256:042f6efe0f16e94ffb2d0a3bede852bb026b6dce661ac5b339e6f63846467b9d +FROM registry.redhat.io/rhel8/postgresql-15:latest@sha256:103fd3b9deeea2a7c7d16af246ee5274bcd0f8b9e508485530c5b42ea2b9916c ARG BUILD_TAG RUN if [[ "$BUILD_TAG" == "" ]]; then >&2 echo "error: required BUILD_TAG arg is unset"; exit 6; fi diff --git a/scanner/image/scanner/konflux.Dockerfile b/scanner/image/scanner/konflux.Dockerfile index b0c4519d56dbd..17b1dee562a1e 100644 --- a/scanner/image/scanner/konflux.Dockerfile +++ b/scanner/image/scanner/konflux.Dockerfile @@ -17,7 +17,7 @@ WORKDIR /src RUN make -C scanner NODEPS=1 CGO_ENABLED=1 image/scanner/bin/scanner copy-scripts -FROM registry.access.redhat.com/ubi8-minimal:latest@sha256:a670c5b613280e17a666c858c9263a50aafe1a023a8d5730c7a83cb53771487b +FROM registry.access.redhat.com/ubi8-minimal:latest@sha256:5dc6ba426ccbeb3954ead6b015f36b4a2d22320e5b356b074198d08422464ed2 ARG BUILD_TAG From 5fad0a47e66e16986582e3b130bdb45bee5ff632 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Tue, 3 Feb 2026 15:18:33 -0500 Subject: [PATCH 108/232] ROX-32814: auto apply sac to select queries (#18753) --- .../checkresults/datastore/datastore_impl.go | 30 ------------ .../integration/datastore/datastore_impl.go | 7 --- .../internal/store/postgres/full_store.go | 7 +-- central/views/deployments/view_impl.go | 9 +--- central/views/imagecomponentflat/view_impl.go | 15 +----- central/views/imagecve/view_impl.go | 46 +++---------------- central/views/imagecveflat/view_impl.go | 17 +------ central/views/images/view_impl.go | 9 +--- central/views/nodecve/view_impl.go | 37 ++------------- central/views/platformcve/view_impl.go | 46 ++----------------- pkg/search/postgres/common_test.go | 6 +-- pkg/search/postgres/sac.go | 4 ++ pkg/search/postgres/select.go | 5 ++ pkg/search/postgres/testutils.go | 4 +- 14 files changed, 36 insertions(+), 206 deletions(-) diff --git a/central/complianceoperator/v2/checkresults/datastore/datastore_impl.go b/central/complianceoperator/v2/checkresults/datastore/datastore_impl.go index ed2471e3ffd2d..2b8acc452c2b8 100644 --- a/central/complianceoperator/v2/checkresults/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/checkresults/datastore/datastore_impl.go @@ -7,7 +7,6 @@ import ( "github.com/pkg/errors" store "github.com/stackrox/rox/central/complianceoperator/v2/checkresults/store/postgres" - complianceUtils "github.com/stackrox/rox/central/complianceoperator/v2/utils" "github.com/stackrox/rox/central/metrics" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" @@ -77,12 +76,6 @@ func (d *datastoreImpl) GetComplianceCheckResult(ctx context.Context, compliance func (d *datastoreImpl) ComplianceCheckResultStats(ctx context.Context, query *v1.Query) ([]*ResourceResultCountByClusterScan, error) { defer metrics.SetDatastoreFunctionDuration(time.Now(), "ComplianceOperatorCheckResultV2", "ComplianceCheckResultStats") - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return nil, err - } - cloned := query.CloneVT() cloned.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.ClusterID).Proto(), @@ -127,12 +120,6 @@ func (d *datastoreImpl) ComplianceCheckResultStats(ctx context.Context, query *v func (d *datastoreImpl) ComplianceProfileResultStats(ctx context.Context, query *v1.Query) ([]*ResourceResultCountByProfile, error) { defer metrics.SetDatastoreFunctionDuration(time.Now(), "ComplianceOperatorCheckResultV2", "ComplianceProfileResultStats") - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return nil, err - } - cloned := query.CloneVT() cloned.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.ComplianceOperatorProfileName).Proto(), @@ -167,12 +154,6 @@ func (d *datastoreImpl) ComplianceProfileResultStats(ctx context.Context, query func (d *datastoreImpl) ComplianceProfileResults(ctx context.Context, query *v1.Query) ([]*ResourceResultsByProfile, error) { defer metrics.SetDatastoreFunctionDuration(time.Now(), "ComplianceOperatorCheckResultV2", "ComplianceProfileResultStats") - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return nil, err - } - cloned := query.CloneVT() cloned.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.ComplianceOperatorProfileName).Proto(), @@ -222,12 +203,6 @@ func (d *datastoreImpl) ComplianceProfileResults(ctx context.Context, query *v1. func (d *datastoreImpl) ComplianceClusterStats(ctx context.Context, query *v1.Query) ([]*ResultStatusCountByCluster, error) { defer metrics.SetDatastoreFunctionDuration(time.Now(), "ComplianceOperatorCheckResultV2", "ComplianceClusterStats") - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return nil, err - } - cloned := query.CloneVT() cloned.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.ClusterID).Proto(), @@ -267,11 +242,6 @@ func (d *datastoreImpl) ComplianceClusterStats(ctx context.Context, query *v1.Qu // CountByField retrieves the distinct scan result counts specified by query based on specified search field func (d *datastoreImpl) CountByField(ctx context.Context, query *v1.Query, field search.FieldLabel) (int, error) { - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return 0, err - } switch field { case search.ClusterID: diff --git a/central/complianceoperator/v2/integration/datastore/datastore_impl.go b/central/complianceoperator/v2/integration/datastore/datastore_impl.go index ef67c1cdf92f0..e6a8a855d3bb4 100644 --- a/central/complianceoperator/v2/integration/datastore/datastore_impl.go +++ b/central/complianceoperator/v2/integration/datastore/datastore_impl.go @@ -5,7 +5,6 @@ import ( "github.com/pkg/errors" store "github.com/stackrox/rox/central/complianceoperator/v2/integration/store/postgres" - complianceUtils "github.com/stackrox/rox/central/complianceoperator/v2/utils" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/errox" @@ -45,12 +44,6 @@ func (ds *datastoreImpl) GetComplianceIntegrations(ctx context.Context, query *v // GetComplianceIntegrationsView provides an in memory layer on top of the underlying DB based storage. func (ds *datastoreImpl) GetComplianceIntegrationsView(ctx context.Context, query *v1.Query) ([]*IntegrationDetails, error) { - var err error - query, err = complianceUtils.WithSACFilter(ctx, resources.Compliance, query) - if err != nil { - return nil, err - } - cloned := query.CloneVT() cloned.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.Cluster).Proto(), diff --git a/central/deployment/datastore/internal/store/postgres/full_store.go b/central/deployment/datastore/internal/store/postgres/full_store.go index d162412243bde..2fe4155349120 100644 --- a/central/deployment/datastore/internal/store/postgres/full_store.go +++ b/central/deployment/datastore/internal/store/postgres/full_store.go @@ -14,7 +14,6 @@ import ( "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/postgres" pkgSchema "github.com/stackrox/rox/pkg/postgres/schema" - "github.com/stackrox/rox/pkg/sac/resources" pkgSearch "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "gorm.io/gorm" @@ -71,10 +70,6 @@ func (f *fullStoreImpl) GetContainerImageViews(ctx context.Context, q *v1.Query) if err := common.ValidateQuery(q); err != nil { return nil, err } - q, err := common.WithSACFilter(ctx, resources.Deployment, q) - if err != nil { - return nil, err - } q.Selects = []*v1.QuerySelect{ pkgSearch.NewQuerySelect(pkgSearch.ImageID).Proto(), pkgSearch.NewQuerySelect(pkgSearch.ImageSHA).Proto(), @@ -88,7 +83,7 @@ func (f *fullStoreImpl) GetContainerImageViews(ctx context.Context, q *v1.Query) defer cancel() var results []*views.ContainerImageView - err = pgSearch.RunSelectRequestForSchemaFn(queryCtx, f.db, pkgSchema.DeploymentsSchema, q, func(response *views.ContainerImageView) error { + err := pgSearch.RunSelectRequestForSchemaFn(queryCtx, f.db, pkgSchema.DeploymentsSchema, q, func(response *views.ContainerImageView) error { results = append(results, response) return nil }) diff --git a/central/views/deployments/view_impl.go b/central/views/deployments/view_impl.go index 53b789c48fb8d..6b022a46e7e94 100644 --- a/central/views/deployments/view_impl.go +++ b/central/views/deployments/view_impl.go @@ -9,7 +9,6 @@ import ( "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) @@ -28,20 +27,14 @@ func (v *deploymentViewImpl) Get(ctx context.Context, query *v1.Query) ([]Deploy return nil, err } - var err error // Update the sort options to use aggregations if necessary as we are grouping by CVEs query = common.UpdateSortAggs(query) - query, err = common.WithSACFilter(ctx, resources.Deployment, query) - if err != nil { - return nil, err - } query = withSelectQuery(query) queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*deploymentResponse - results, err = pgSearch.RunSelectRequestForSchema[deploymentResponse](queryCtx, v.db, v.schema, query) + results, err := pgSearch.RunSelectRequestForSchema[deploymentResponse](queryCtx, v.db, v.schema, query) if err != nil { return nil, err } diff --git a/central/views/imagecomponentflat/view_impl.go b/central/views/imagecomponentflat/view_impl.go index 3c87cc47b9894..40ba739a4b52d 100644 --- a/central/views/imagecomponentflat/view_impl.go +++ b/central/views/imagecomponentflat/view_impl.go @@ -10,7 +10,6 @@ import ( "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" @@ -30,12 +29,6 @@ func (v *imageComponentFlatViewImpl) Count(ctx context.Context, q *v1.Query) (in return 0, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Image, q) - if err != nil { - return 0, err - } - queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() @@ -56,21 +49,15 @@ func (v *imageComponentFlatViewImpl) Get(ctx context.Context, q *v1.Query) ([]Co return nil, err } - var err error // Avoid changing the passed query cloned := q.CloneVT() // Update the sort options to use aggregations if necessary as we are grouping by CVEs cloned = common.UpdateSortAggs(cloned) - cloned, err = common.WithSACFilter(ctx, resources.Image, cloned) - if err != nil { - return nil, err - } queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*imageComponentFlatResponse - results, err = pgSearch.RunSelectRequestForSchema[imageComponentFlatResponse](queryCtx, v.db, v.schema, withSelectComponentCoreResponseQuery(cloned)) + results, err := pgSearch.RunSelectRequestForSchema[imageComponentFlatResponse](queryCtx, v.db, v.schema, withSelectComponentCoreResponseQuery(cloned)) if err != nil { return nil, err } diff --git a/central/views/imagecve/view_impl.go b/central/views/imagecve/view_impl.go index 24f9873173437..08a746f5333f4 100644 --- a/central/views/imagecve/view_impl.go +++ b/central/views/imagecve/view_impl.go @@ -13,7 +13,6 @@ import ( "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" @@ -34,17 +33,10 @@ func (v *imageCVECoreViewImpl) Count(ctx context.Context, q *v1.Query) (int, err return 0, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Image, q) - if err != nil { - return 0, err - } - queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*imageCVECoreCount - results, err = pgSearch.RunSelectRequestForSchema[imageCVECoreCount](queryCtx, v.db, v.schema, common.WithCountQuery(q, search.CVE)) + results, err := pgSearch.RunSelectRequestForSchema[imageCVECoreCount](queryCtx, v.db, v.schema, common.WithCountQuery(q, search.CVE)) if err != nil { return 0, err } @@ -64,17 +56,10 @@ func (v *imageCVECoreViewImpl) CountBySeverity(ctx context.Context, q *v1.Query) return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Image, q) - if err != nil { - return nil, err - } - queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*common.ResourceCountByImageCVESeverity - results, err = pgSearch.RunSelectRequestForSchema[common.ResourceCountByImageCVESeverity](queryCtx, v.db, v.schema, common.WithCountBySeverityAndFixabilityQuery(q, search.CVE)) + results, err := pgSearch.RunSelectRequestForSchema[common.ResourceCountByImageCVESeverity](queryCtx, v.db, v.schema, common.WithCountBySeverityAndFixabilityQuery(q, search.CVE)) if err != nil { return nil, err } @@ -110,17 +95,13 @@ func (v *imageCVECoreViewImpl) Get(ctx context.Context, q *v1.Query, options vie return nil, err } - var err error // Avoid changing the passed query cloned := q.CloneVT() // Update the sort options to use aggregations if necessary as we are grouping by CVEs cloned = common.UpdateSortAggs(cloned) - cloned, err = common.WithSACFilter(ctx, resources.Image, cloned) - if err != nil { - return nil, err - } var cveIDsToFilter []string + var err error if cloned.GetPagination().GetLimit() > 0 || cloned.GetPagination().GetOffset() > 0 { cveIDsToFilter, err = v.getFilteredCVEs(ctx, cloned) if err != nil { @@ -154,12 +135,6 @@ func (v *imageCVECoreViewImpl) Get(ctx context.Context, q *v1.Query, options vie } func (v *imageCVECoreViewImpl) GetDeploymentIDs(ctx context.Context, q *v1.Query) ([]string, error) { - var err error - q, err = common.WithSACFilter(ctx, resources.Deployment, q) - if err != nil { - return nil, err - } - q.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.DeploymentID).Distinct().Proto(), } @@ -167,8 +142,7 @@ func (v *imageCVECoreViewImpl) GetDeploymentIDs(ctx context.Context, q *v1.Query queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*deploymentResponse - results, err = pgSearch.RunSelectRequestForSchema[deploymentResponse](queryCtx, v.db, v.schema, q) + results, err := pgSearch.RunSelectRequestForSchema[deploymentResponse](queryCtx, v.db, v.schema, q) if err != nil || len(results) == 0 { return nil, err } @@ -181,12 +155,6 @@ func (v *imageCVECoreViewImpl) GetDeploymentIDs(ctx context.Context, q *v1.Query } func (v *imageCVECoreViewImpl) GetImageIDs(ctx context.Context, q *v1.Query) ([]string, error) { - var err error - q, err = common.WithSACFilter(ctx, resources.Image, q) - if err != nil { - return nil, err - } - searchField := search.ImageSHA if features.FlattenImageData.Enabled() { searchField = search.ImageID @@ -199,8 +167,7 @@ func (v *imageCVECoreViewImpl) GetImageIDs(ctx context.Context, q *v1.Query) ([] defer cancel() if features.FlattenImageData.Enabled() { - var results []*imageV2Response - results, err = pgSearch.RunSelectRequestForSchema[imageV2Response](queryCtx, v.db, v.schema, q) + results, err := pgSearch.RunSelectRequestForSchema[imageV2Response](queryCtx, v.db, v.schema, q) if err != nil || len(results) == 0 { return nil, err } @@ -211,8 +178,7 @@ func (v *imageCVECoreViewImpl) GetImageIDs(ctx context.Context, q *v1.Query) ([] } return ret, nil } - var results []*imageResponse - results, err = pgSearch.RunSelectRequestForSchema[imageResponse](queryCtx, v.db, v.schema, q) + results, err := pgSearch.RunSelectRequestForSchema[imageResponse](queryCtx, v.db, v.schema, q) if err != nil || len(results) == 0 { return nil, err } diff --git a/central/views/imagecveflat/view_impl.go b/central/views/imagecveflat/view_impl.go index f61e866f94072..61734e1a29b78 100644 --- a/central/views/imagecveflat/view_impl.go +++ b/central/views/imagecveflat/view_impl.go @@ -14,7 +14,6 @@ import ( "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" @@ -36,17 +35,10 @@ func (v *imageCVEFlatViewImpl) Count(ctx context.Context, q *v1.Query) (int, err return 0, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Image, q) - if err != nil { - return 0, err - } - queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*imageCVEFlatCount - results, err = pgSearch.RunSelectRequestForSchema[imageCVEFlatCount](queryCtx, v.db, v.schema, common.WithCountQuery(q, search.CVE)) + results, err := pgSearch.RunSelectRequestForSchema[imageCVEFlatCount](queryCtx, v.db, v.schema, common.WithCountQuery(q, search.CVE)) if err != nil { return 0, err } @@ -65,19 +57,14 @@ func (v *imageCVEFlatViewImpl) Get(ctx context.Context, q *v1.Query, options vie if err := common.ValidateQuery(q); err != nil { return nil, err } - var err error // Avoid changing the passed query cloned := q.CloneVT() // Update the sort options to use aggregations if necessary as we are grouping by CVEs cloned = common.UpdateSortAggs(cloned) - cloned, err = common.WithSACFilter(ctx, resources.Image, cloned) - if err != nil { - log.Error(err) - return nil, err - } // Performance improvements to narrow aggregations performed var cveIDsToFilter []string + var err error if cloned.GetPagination().GetLimit() > 0 || cloned.GetPagination().GetOffset() > 0 { cveIDsToFilter, err = v.getFilteredCVEs(ctx, cloned) if err != nil { diff --git a/central/views/images/view_impl.go b/central/views/images/view_impl.go index f223db006efbb..77f6941b54789 100644 --- a/central/views/images/view_impl.go +++ b/central/views/images/view_impl.go @@ -10,7 +10,6 @@ import ( "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) @@ -29,18 +28,12 @@ func (v *imageCoreViewImpl) Get(ctx context.Context, query *v1.Query) ([]ImageCo return nil, err } - var err error - query, err = common.WithSACFilter(ctx, resources.Image, query) - if err != nil { - return nil, err - } query = withSelectQuery(query) queryCtx, cancel := contextutil.ContextWithTimeoutIfNotExists(ctx, queryTimeout) defer cancel() - var results []*imageResponse - results, err = pgSearch.RunSelectRequestForSchema[imageResponse](queryCtx, v.db, v.schema, query) + results, err := pgSearch.RunSelectRequestForSchema[imageResponse](queryCtx, v.db, v.schema, query) if err != nil { return nil, err } diff --git a/central/views/nodecve/view_impl.go b/central/views/nodecve/view_impl.go index e2b3ca109b489..37d2ac5ce0c9f 100644 --- a/central/views/nodecve/view_impl.go +++ b/central/views/nodecve/view_impl.go @@ -9,7 +9,6 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" @@ -26,14 +25,7 @@ func (n *nodeCVECoreViewImpl) Count(ctx context.Context, q *v1.Query) (int, erro return 0, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Node, q) - if err != nil { - return 0, err - } - - var results []*nodeCVECoreCount - results, err = pgSearch.RunSelectRequestForSchema[nodeCVECoreCount](ctx, n.db, n.schema, common.WithCountQuery(q, search.CVE)) + results, err := pgSearch.RunSelectRequestForSchema[nodeCVECoreCount](ctx, n.db, n.schema, common.WithCountQuery(q, search.CVE)) if err != nil { return 0, err } @@ -52,14 +44,7 @@ func (n *nodeCVECoreViewImpl) Get(ctx context.Context, q *v1.Query) ([]CveCore, return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Node, q) - if err != nil { - return nil, err - } - - var results []*nodeCVECoreResponse - results, err = pgSearch.RunSelectRequestForSchema[nodeCVECoreResponse](ctx, n.db, n.schema, withSelectQuery(q)) + results, err := pgSearch.RunSelectRequestForSchema[nodeCVECoreResponse](ctx, n.db, n.schema, withSelectQuery(q)) if err != nil { return nil, err } @@ -83,14 +68,7 @@ func (n *nodeCVECoreViewImpl) CountBySeverity(ctx context.Context, q *v1.Query) return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Node, q) - if err != nil { - return nil, err - } - - var results []*countByNodeCVESeverity - results, err = pgSearch.RunSelectRequestForSchema[countByNodeCVESeverity](ctx, n.db, n.schema, common.WithCountBySeverityAndFixabilityQuery(q, search.CVE)) + results, err := pgSearch.RunSelectRequestForSchema[countByNodeCVESeverity](ctx, n.db, n.schema, common.WithCountBySeverityAndFixabilityQuery(q, search.CVE)) if err != nil { return nil, err } @@ -107,18 +85,11 @@ func (n *nodeCVECoreViewImpl) CountBySeverity(ctx context.Context, q *v1.Query) } func (n *nodeCVECoreViewImpl) GetNodeIDs(ctx context.Context, q *v1.Query) ([]string, error) { - var err error - q, err = common.WithSACFilter(ctx, resources.Node, q) - if err != nil { - return nil, err - } - q.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.NodeID).Distinct().Proto(), } - var results []*nodeResponse - results, err = pgSearch.RunSelectRequestForSchema[nodeResponse](ctx, n.db, n.schema, q) + results, err := pgSearch.RunSelectRequestForSchema[nodeResponse](ctx, n.db, n.schema, q) if err != nil || len(results) == 0 { return nil, err } diff --git a/central/views/platformcve/view_impl.go b/central/views/platformcve/view_impl.go index f323d04f55fcd..f7c1057b06ddd 100644 --- a/central/views/platformcve/view_impl.go +++ b/central/views/platformcve/view_impl.go @@ -9,7 +9,6 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/walker" - "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" pgSearch "github.com/stackrox/rox/pkg/search/postgres" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" @@ -26,14 +25,7 @@ func (v *platformCVECoreViewImpl) Count(ctx context.Context, q *v1.Query) (int, return 0, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Cluster, q) - if err != nil { - return 0, err - } - - var results []*platformCVECoreCount - results, err = pgSearch.RunSelectRequestForSchema[platformCVECoreCount](ctx, v.db, v.schema, common.WithCountQuery(q, search.CVEID)) + results, err := pgSearch.RunSelectRequestForSchema[platformCVECoreCount](ctx, v.db, v.schema, common.WithCountQuery(q, search.CVEID)) if err != nil { return 0, err } @@ -53,14 +45,7 @@ func (v *platformCVECoreViewImpl) Get(ctx context.Context, q *v1.Query) ([]CveCo return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Cluster, q) - if err != nil { - return nil, err - } - - var results []*platformCVECoreResponse - results, err = pgSearch.RunSelectRequestForSchema[platformCVECoreResponse](ctx, v.db, v.schema, withSelectQuery(q)) + results, err := pgSearch.RunSelectRequestForSchema[platformCVECoreResponse](ctx, v.db, v.schema, withSelectQuery(q)) if err != nil { return nil, err } @@ -73,18 +58,11 @@ func (v *platformCVECoreViewImpl) Get(ctx context.Context, q *v1.Query) ([]CveCo } func (v *platformCVECoreViewImpl) GetClusterIDs(ctx context.Context, q *v1.Query) ([]string, error) { - var err error - q, err = common.WithSACFilter(ctx, resources.Cluster, q) - if err != nil { - return nil, err - } - q.Selects = []*v1.QuerySelect{ search.NewQuerySelect(search.ClusterID).Distinct().Proto(), } - var results []*clusterResponse - results, err = pgSearch.RunSelectRequestForSchema[clusterResponse](ctx, v.db, v.schema, q) + results, err := pgSearch.RunSelectRequestForSchema[clusterResponse](ctx, v.db, v.schema, q) if err != nil || len(results) == 0 { return nil, err } @@ -101,14 +79,7 @@ func (v *platformCVECoreViewImpl) CVECountByType(ctx context.Context, q *v1.Quer return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Cluster, q) - if err != nil { - return nil, err - } - - var results []*cveCountByTypeResponse - results, err = pgSearch.RunSelectRequestForSchema[cveCountByTypeResponse](ctx, v.db, v.schema, withCVECountByTypeQuery(q)) + results, err := pgSearch.RunSelectRequestForSchema[cveCountByTypeResponse](ctx, v.db, v.schema, withCVECountByTypeQuery(q)) if err != nil { return nil, err } @@ -129,14 +100,7 @@ func (v *platformCVECoreViewImpl) CVECountByFixability(ctx context.Context, q *v return nil, err } - var err error - q, err = common.WithSACFilter(ctx, resources.Cluster, q) - if err != nil { - return nil, err - } - - var results []*cveCountByFixabilityResponse - results, err = pgSearch.RunSelectRequestForSchema[cveCountByFixabilityResponse](ctx, v.db, v.schema, withCVECountByFixabilityQuery(q)) + results, err := pgSearch.RunSelectRequestForSchema[cveCountByFixabilityResponse](ctx, v.db, v.schema, withCVECountByFixabilityQuery(q)) if err != nil { return nil, err } diff --git a/pkg/search/postgres/common_test.go b/pkg/search/postgres/common_test.go index 5514f4692974c..194de671f4978 100644 --- a/pkg/search/postgres/common_test.go +++ b/pkg/search/postgres/common_test.go @@ -671,7 +671,7 @@ func TestSelectQueries(t *testing.T) { }, { desc: "base schema; select w/ where; image scope", - ctx: scoped.Context(context.Background(), scoped.Scope{ + ctx: scoped.Context(sac.WithAllAccess(context.Background()), scoped.Scope{ IDs: []string{"fake-image"}, Level: v1.SearchCategory_IMAGES, }), @@ -685,7 +685,7 @@ func TestSelectQueries(t *testing.T) { }, { desc: "base schema; select w/ multiple scopes", - ctx: scoped.Context(context.Background(), scoped.Scope{ + ctx: scoped.Context(sac.WithAllAccess(context.Background()), scoped.Scope{ IDs: []string{uuid.NewV4().String()}, Level: v1.SearchCategory_NAMESPACES, Parent: &scoped.Scope{ @@ -740,7 +740,7 @@ func TestSelectQueries(t *testing.T) { t.Run(c.desc, func(t *testing.T) { ctx := c.ctx if c.ctx == nil { - ctx = context.Background() + ctx = sac.WithAllAccess(context.Background()) } testSchema := c.schema actualQ, err := standardizeSelectQueryAndPopulatePath(ctx, c.q, testSchema, SELECT) diff --git a/pkg/search/postgres/sac.go b/pkg/search/postgres/sac.go index 5c39a6d5fd7f1..afcf21bed7002 100644 --- a/pkg/search/postgres/sac.go +++ b/pkg/search/postgres/sac.go @@ -28,8 +28,10 @@ func enrichQueryWithSACFilter(ctx context.Context, q *v1.Query, schema *walker.S return q, nil } pagination := q.GetPagination() + groupBy := q.GetGroupBy() query := searchPkg.ConjunctionQuery(sacFilter, q) query.Pagination = pagination + query.GroupBy = groupBy return query, nil default: sacFilter, err := GetReadSACQuery(ctx, schema.ScopingResource) @@ -41,9 +43,11 @@ func enrichQueryWithSACFilter(ctx context.Context, q *v1.Query, schema *walker.S } pagination := q.GetPagination() selects := q.GetSelects() + groupBy := q.GetGroupBy() query := searchPkg.ConjunctionQuery(sacFilter, q) query.Pagination = pagination query.Selects = selects + query.GroupBy = groupBy return query, nil } } diff --git a/pkg/search/postgres/select.go b/pkg/search/postgres/select.go index b0b41628463eb..bf2673a514641 100644 --- a/pkg/search/postgres/select.go +++ b/pkg/search/postgres/select.go @@ -95,6 +95,11 @@ func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, sch return nil, err } + q, err = enrichQueryWithSACFilter(ctx, q, schema, queryType) + if err != nil { + return nil, err + } + standardizeFieldNamesInQuery(q) joins, dbFields := getJoinsAndFields(schema, q) if len(q.GetSelects()) == 0 && q.GetQuery() == nil { diff --git a/pkg/search/postgres/testutils.go b/pkg/search/postgres/testutils.go index ddf60e73edf50..5ae5c1eef015a 100644 --- a/pkg/search/postgres/testutils.go +++ b/pkg/search/postgres/testutils.go @@ -6,12 +6,14 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/pkg/postgres/walker" + "github.com/stackrox/rox/pkg/sac" "github.com/stretchr/testify/assert" ) // AssertSQLQueryString a utility function for test purpose. func AssertSQLQueryString(t testing.TB, q *v1.Query, schema *walker.Schema, expected string) { - actual, err := standardizeSelectQueryAndPopulatePath(context.Background(), q, schema, SELECT) + ctx := sac.WithAllAccess(context.Background()) + actual, err := standardizeSelectQueryAndPopulatePath(ctx, q, schema, SELECT) assert.NoError(t, err) assert.Equal(t, expected, actual.AsSQL()) } From 85b4bfb9d2a555f5360b0a386329adadaf118c23 Mon Sep 17 00:00:00 2001 From: "red-hat-konflux[bot]" <126015336+red-hat-konflux[bot]@users.noreply.github.com> Date: Tue, 3 Feb 2026 20:27:26 +0000 Subject: [PATCH 109/232] chore(deps): refresh rpm lockfiles [SECURITY] (#18639) Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: red-hat-konflux[bot] <126015336+red-hat-konflux[bot]@users.noreply.github.com> From 20cfa42f68facbae0370a14055b232308f9577f9 Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Tue, 3 Feb 2026 22:08:52 -0500 Subject: [PATCH 110/232] feat(db): add indexed CVE column to ImageCVEInfo table (#18822) Co-authored-by: Claude Opus 4.5 --- .../info/datastore/datastore_impl_test.go | 17 ++++ .../info/datastore/store/postgres/store.go | 5 +- central/image/datastore/datastore_impl.go | 1 + .../datastore_impl_flat_postgres_test.go | 1 + central/imagev2/datastore/datastore_impl.go | 1 + generated/storage/cve.pb.go | 13 ++- generated/storage/cve_vtproto.pb.go | 83 +++++++++++++++++++ pkg/postgres/schema/image_cve_infos.go | 1 + proto/storage/cve.proto | 1 + proto/storage/proto.lock | 5 ++ 10 files changed, 125 insertions(+), 3 deletions(-) diff --git a/central/cve/image/info/datastore/datastore_impl_test.go b/central/cve/image/info/datastore/datastore_impl_test.go index 5798ed73e5c81..877862cbfcfb8 100644 --- a/central/cve/image/info/datastore/datastore_impl_test.go +++ b/central/cve/image/info/datastore/datastore_impl_test.go @@ -44,6 +44,7 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpdateTimestamps_NilOld() { now := time.Now() newInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(now), FixAvailableTimestamp: timestamppb.New(now), } @@ -62,10 +63,12 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpdateTimestamps_PreservesEarlierFirstS oldInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(earlier), } newInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(later), } @@ -82,10 +85,12 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpdateTimestamps_PreservesEarlierFixAva oldInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FixAvailableTimestamp: timestamppb.New(earlier), } newInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FixAvailableTimestamp: timestamppb.New(later), } @@ -101,10 +106,12 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpdateTimestamps_UsesNewWhenOldIsZero() oldInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: nil, // Zero timestamp } newInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(now), } @@ -120,10 +127,12 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpdateTimestamps_UsesOldWhenNewIsZero() oldInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(earlier), } newInfo := &storage.ImageCVEInfo{ Id: "test-id", + Cve: "test-cve", FirstSystemOccurrence: nil, // Zero timestamp } @@ -141,6 +150,7 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsert_PreservesTimestamps() { // First, insert an info with an earlier timestamp firstInfo := &storage.ImageCVEInfo{ Id: "test-cve#test-pkg#test-ds", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(earlier), FixAvailableTimestamp: timestamppb.New(earlier), } @@ -150,6 +160,7 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsert_PreservesTimestamps() { // Now upsert with a later timestamp secondInfo := &storage.ImageCVEInfo{ Id: "test-cve#test-pkg#test-ds", + Cve: "test-cve", FirstSystemOccurrence: timestamppb.New(later), FixAvailableTimestamp: timestamppb.New(later), } @@ -175,16 +186,19 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsertMany_PreservesTimestamps() { firstInfos := []*storage.ImageCVEInfo{ { Id: "test-cve-1#test-pkg#test-ds", + Cve: "test-cve-1", FirstSystemOccurrence: timestamppb.New(earlier), FixAvailableTimestamp: timestamppb.New(earlier), }, { Id: "test-cve-2#test-pkg#test-ds", + Cve: "test-cve-2", FirstSystemOccurrence: timestamppb.New(earlier), FixAvailableTimestamp: timestamppb.New(earlier), }, { Id: "test-cve-1#test-pkg#test-ds", + Cve: "test-cve-1", FirstSystemOccurrence: nil, FixAvailableTimestamp: timestamppb.New(earlier2), }, @@ -196,11 +210,13 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsertMany_PreservesTimestamps() { secondInfos := []*storage.ImageCVEInfo{ { Id: "test-cve-1#test-pkg#test-ds", + Cve: "test-cve-1", FirstSystemOccurrence: timestamppb.New(later), FixAvailableTimestamp: timestamppb.New(later), }, { Id: "test-cve-2#test-pkg#test-ds", + Cve: "test-cve-2", FirstSystemOccurrence: timestamppb.New(later), FixAvailableTimestamp: timestamppb.New(later), }, @@ -225,6 +241,7 @@ func (s *ImageCVEInfoDataStoreSuite) TestUpsert_NewInfo() { info := &storage.ImageCVEInfo{ Id: "new-cve#new-pkg#new-ds", + Cve: "new-cve", FirstSystemOccurrence: timestamppb.New(now), FixAvailableTimestamp: timestamppb.New(now), } diff --git a/central/cve/image/info/datastore/store/postgres/store.go b/central/cve/image/info/datastore/store/postgres/store.go index 1eb0c84aa1029..a19af9e49b679 100644 --- a/central/cve/image/info/datastore/store/postgres/store.go +++ b/central/cve/image/info/datastore/store/postgres/store.go @@ -108,10 +108,11 @@ func insertIntoImageCveInfos(batch *pgx.Batch, obj *storage.ImageCVEInfo) error obj.GetId(), protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), protocompat.NilOrTime(obj.GetFirstSystemOccurrence()), + obj.GetCve(), serialized, } - finalStr := "INSERT INTO image_cve_infos (Id, FixAvailableTimestamp, FirstSystemOccurrence, serialized) VALUES($1, $2, $3, $4) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, FixAvailableTimestamp = EXCLUDED.FixAvailableTimestamp, FirstSystemOccurrence = EXCLUDED.FirstSystemOccurrence, serialized = EXCLUDED.serialized" + finalStr := "INSERT INTO image_cve_infos (Id, FixAvailableTimestamp, FirstSystemOccurrence, Cve, serialized) VALUES($1, $2, $3, $4, $5) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, FixAvailableTimestamp = EXCLUDED.FixAvailableTimestamp, FirstSystemOccurrence = EXCLUDED.FirstSystemOccurrence, Cve = EXCLUDED.Cve, serialized = EXCLUDED.serialized" batch.Queue(finalStr, values...) return nil @@ -121,6 +122,7 @@ var copyColsImageCveInfos = []string{ "id", "fixavailabletimestamp", "firstsystemoccurrence", + "cve", "serialized", } @@ -158,6 +160,7 @@ func copyFromImageCveInfos(ctx context.Context, s pgSearch.Deleter, tx *postgres obj.GetId(), protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), protocompat.NilOrTime(obj.GetFirstSystemOccurrence()), + obj.GetCve(), serialized, }, nil }) diff --git a/central/image/datastore/datastore_impl.go b/central/image/datastore/datastore_impl.go index f0547036f40d7..3d38d3f8d02c2 100644 --- a/central/image/datastore/datastore_impl.go +++ b/central/image/datastore/datastore_impl.go @@ -414,6 +414,7 @@ func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage info := &storage.ImageCVEInfo{ Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), + Cve: vuln.GetCve(), FixAvailableTimestamp: fixAvailableTimestamp, FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing } diff --git a/central/image/datastore/datastore_impl_flat_postgres_test.go b/central/image/datastore/datastore_impl_flat_postgres_test.go index 7d14fc7fd2135..80cb5e9c1ade3 100644 --- a/central/image/datastore/datastore_impl_flat_postgres_test.go +++ b/central/image/datastore/datastore_impl_flat_postgres_test.go @@ -704,6 +704,7 @@ func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_Enrich preExistingInfo := &storage.ImageCVEInfo{ Id: pkgCVE.ImageCVEInfoID("CVE-2021-5678", "curl", "debian-updater::debian:11"), + Cve: "CVE-2021-5678", FixAvailableTimestamp: earlierTime, FirstSystemOccurrence: earlierTime, } diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index 08fb6c5f2f903..ab3273447dfee 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -420,6 +420,7 @@ func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage info := &storage.ImageCVEInfo{ Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), + Cve: vuln.GetCve(), FixAvailableTimestamp: fixAvailableTimestamp, FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing } diff --git a/generated/storage/cve.pb.go b/generated/storage/cve.pb.go index 3b164cf577040..5f70d577df0de 100644 --- a/generated/storage/cve.pb.go +++ b/generated/storage/cve.pb.go @@ -2365,6 +2365,7 @@ type ImageCVEInfo struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" search:"CVE Info" sql:"pk"` // @gotags: search:"CVE Info" sql:"pk" FixAvailableTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=fix_available_timestamp,json=fixAvailableTimestamp,proto3" json:"fix_available_timestamp,omitempty" search:"CVE Fix Available Timestamp,hidden"` // @gotags: search:"CVE Fix Available Timestamp,hidden" FirstSystemOccurrence *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=first_system_occurrence,json=firstSystemOccurrence,proto3" json:"first_system_occurrence,omitempty" search:"First System Occurrence Timestamp,hidden"` // @gotags: search:"First System Occurrence Timestamp,hidden" + Cve string `protobuf:"bytes,4,opt,name=cve,proto3" json:"cve,omitempty" search:"CVE" sql:"index"` // @gotags: search:"CVE" sql:"index" unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -2420,6 +2421,13 @@ func (x *ImageCVEInfo) GetFirstSystemOccurrence() *timestamppb.Timestamp { return nil } +func (x *ImageCVEInfo) GetCve() string { + if x != nil { + return x.Cve + } + return "" +} + type CVE_DistroSpecific struct { state protoimpl.MessageState `protogen:"open.v1"` Severity VulnerabilitySeverity `protobuf:"varint,1,opt,name=severity,proto3,enum=storage.VulnerabilitySeverity" json:"severity,omitempty"` @@ -2836,11 +2844,12 @@ const file_storage_cve_proto_rawDesc = "" + "\n" + "\x06MEDIUM\x10\x03\x12\b\n" + "\x04HIGH\x10\x04\x12\f\n" + - "\bCRITICAL\x10\x05\"\xc6\x01\n" + + "\bCRITICAL\x10\x05\"\xd8\x01\n" + "\fImageCVEInfo\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12R\n" + "\x17fix_available_timestamp\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x15fixAvailableTimestamp\x12R\n" + - "\x17first_system_occurrence\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x15firstSystemOccurrence*D\n" + + "\x17first_system_occurrence\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x15firstSystemOccurrence\x12\x10\n" + + "\x03cve\x18\x04 \x01(\tR\x03cve*D\n" + "\x12VulnerabilityState\x12\f\n" + "\bOBSERVED\x10\x00\x12\f\n" + "\bDEFERRED\x10\x01\x12\x12\n" + diff --git a/generated/storage/cve_vtproto.pb.go b/generated/storage/cve_vtproto.pb.go index e1fdecbbea325..73f01e2f04092 100644 --- a/generated/storage/cve_vtproto.pb.go +++ b/generated/storage/cve_vtproto.pb.go @@ -451,6 +451,7 @@ func (m *ImageCVEInfo) CloneVT() *ImageCVEInfo { r.Id = m.Id r.FixAvailableTimestamp = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.FixAvailableTimestamp).CloneVT()) r.FirstSystemOccurrence = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.FirstSystemOccurrence).CloneVT()) + r.Cve = m.Cve if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1237,6 +1238,9 @@ func (this *ImageCVEInfo) EqualVT(that *ImageCVEInfo) bool { if !(*timestamppb1.Timestamp)(this.FirstSystemOccurrence).EqualVT((*timestamppb1.Timestamp)(that.FirstSystemOccurrence)) { return false } + if this.Cve != that.Cve { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -2732,6 +2736,13 @@ func (m *ImageCVEInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Cve) > 0 { + i -= len(m.Cve) + copy(dAtA[i:], m.Cve) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Cve))) + i-- + dAtA[i] = 0x22 + } if m.FirstSystemOccurrence != nil { size, err := (*timestamppb1.Timestamp)(m.FirstSystemOccurrence).MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -3402,6 +3413,10 @@ func (m *ImageCVEInfo) SizeVT() (n int) { l = (*timestamppb1.Timestamp)(m.FirstSystemOccurrence).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + l = len(m.Cve) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -7485,6 +7500,38 @@ func (m *ImageCVEInfo) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cve", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cve = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -11698,6 +11745,42 @@ func (m *ImageCVEInfo) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cve", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Cve = stringValue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/postgres/schema/image_cve_infos.go b/pkg/postgres/schema/image_cve_infos.go index a1c3fefdda1e2..90166bc6228f0 100644 --- a/pkg/postgres/schema/image_cve_infos.go +++ b/pkg/postgres/schema/image_cve_infos.go @@ -42,5 +42,6 @@ type ImageCveInfos struct { ID string `gorm:"column:id;type:varchar;primaryKey"` FixAvailableTimestamp *time.Time `gorm:"column:fixavailabletimestamp;type:timestamp"` FirstSystemOccurrence *time.Time `gorm:"column:firstsystemoccurrence;type:timestamp"` + Cve string `gorm:"column:cve;type:varchar;index:imagecveinfos_cve,type:btree"` Serialized []byte `gorm:"column:serialized;type:bytea"` } diff --git a/proto/storage/cve.proto b/proto/storage/cve.proto index 7ec3972237500..8705fa75ed5e1 100644 --- a/proto/storage/cve.proto +++ b/proto/storage/cve.proto @@ -342,4 +342,5 @@ message ImageCVEInfo { string id = 1; // @gotags: search:"CVE Info" sql:"pk" google.protobuf.Timestamp fix_available_timestamp = 2; // @gotags: search:"CVE Fix Available Timestamp,hidden" google.protobuf.Timestamp first_system_occurrence = 3; // @gotags: search:"First System Occurrence Timestamp,hidden" + string cve = 4; // @gotags: search:"CVE" sql:"index" } diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 983e6d118f52d..d777a4ae0e09c 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -6895,6 +6895,11 @@ "id": 3, "name": "first_system_occurrence", "type": "google.protobuf.Timestamp" + }, + { + "id": 4, + "name": "cve", + "type": "string" } ] } From 8f9c493bc8b09fcbc560609c94e01d03639fc1eb Mon Sep 17 00:00:00 2001 From: "red-hat-konflux[bot]" <126015336+red-hat-konflux[bot]@users.noreply.github.com> Date: Wed, 4 Feb 2026 07:12:15 +0000 Subject: [PATCH 111/232] chore(deps): update konflux references (#18430) Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: red-hat-konflux[bot] <126015336+red-hat-konflux[bot]@users.noreply.github.com> --- .tekton/basic-component-pipeline.yaml | 32 +++++++++++++-------------- .tekton/create-custom-snapshot.yaml | 2 +- .tekton/main-pipeline.yaml | 32 +++++++++++++-------------- .tekton/operator-bundle-pipeline.yaml | 30 ++++++++++++------------- .tekton/retag-pipeline.yaml | 2 +- .tekton/scanner-v4-pipeline.yaml | 32 +++++++++++++-------------- 6 files changed, 65 insertions(+), 65 deletions(-) diff --git a/.tekton/basic-component-pipeline.yaml b/.tekton/basic-component-pipeline.yaml index d44396956e29e..2fc8efa916168 100644 --- a/.tekton/basic-component-pipeline.yaml +++ b/.tekton/basic-component-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: show-sbom - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:beb0616db051952b4b861dd8c3e00fa1c0eccbd926feddf71194d3bb3ace9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:e2c1b4eac642f32e91f3bc5d3cb48c5c70888aaf45c3650d9ea34573de7a7fd5 - name: kind value: task resolver: bundles @@ -175,7 +175,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:b349d24cb896573695802d6913d311640b44675ec082b3ad167721946a6a0a71 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 - name: kind value: task resolver: bundles @@ -199,7 +199,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles @@ -258,7 +258,7 @@ spec: - name: name value: prefetch-dependencies-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:8eac535f436874ca27c70434ff356ba941d7b649c0e861bca6f4e9fc5e215478 + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:c664a6df6514b59c3ce53570b0994b45af66ecc89ba2a8e41834eae0622addf6 - name: kind value: task resolver: bundles @@ -311,7 +311,7 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:fe66734cde6713f530b19e222680acc5e0d6a533866429659ea8561b746d9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a - name: kind value: task resolver: bundles @@ -338,7 +338,7 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:985d1efe861b02524a7679ecd855624b3d4e3a2e835b6f8a97ec7d135898ec0b + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 - name: kind value: task resolver: bundles @@ -362,7 +362,7 @@ spec: - name: name value: apply-tags - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:c89cd10b2a3f4c43789c5f06ef2b86f528b28f156c20af5e751fa8c0facd457d + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - name: kind value: task resolver: bundles @@ -409,7 +409,7 @@ spec: - name: name value: deprecated-image-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:808fe09bb5b8503de569de097ae5dd619a7488110f79e8e215e69862ee3fce6d + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:e3a55ccdf1091b4a35507f9ee2d1918d8e89a5f96babcb5486b491226da03d6f - name: kind value: task resolver: bundles @@ -434,7 +434,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:7c2a32de9021f16f6e8df08a55f539f12e00ea4d96f6fb37f9ea04167032c61f + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb - name: kind value: task resolver: bundles @@ -457,7 +457,7 @@ spec: - name: name value: ecosystem-cert-preflight-checks - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:33b7133c0c132c361295c30947f73bd45a3a3b62a24b83f3d8cd7c71f757828c - name: kind value: task resolver: bundles @@ -481,7 +481,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec - name: kind value: task resolver: bundles @@ -505,7 +505,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 - name: kind value: task resolver: bundles @@ -529,7 +529,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0eca130f289a1a1069a1b92943479f79aa7324e4e68d6396fd777ccd97058f50 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f - name: kind value: task resolver: bundles @@ -554,7 +554,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -574,7 +574,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0b10508c82ccb0f5a06a66ce7af56e9bfd40651ddefdf0f499988e897771ee28 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e - name: kind value: task resolver: bundles @@ -600,7 +600,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles diff --git a/.tekton/create-custom-snapshot.yaml b/.tekton/create-custom-snapshot.yaml index 4e865b0fd322b..77a7bdf70e5d1 100644 --- a/.tekton/create-custom-snapshot.yaml +++ b/.tekton/create-custom-snapshot.yaml @@ -142,7 +142,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles diff --git a/.tekton/main-pipeline.yaml b/.tekton/main-pipeline.yaml index 223d6e9194a92..d1773908211d2 100644 --- a/.tekton/main-pipeline.yaml +++ b/.tekton/main-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: show-sbom - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:beb0616db051952b4b861dd8c3e00fa1c0eccbd926feddf71194d3bb3ace9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:e2c1b4eac642f32e91f3bc5d3cb48c5c70888aaf45c3650d9ea34573de7a7fd5 - name: kind value: task resolver: bundles @@ -176,7 +176,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:b349d24cb896573695802d6913d311640b44675ec082b3ad167721946a6a0a71 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 - name: kind value: task resolver: bundles @@ -200,7 +200,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles @@ -284,7 +284,7 @@ spec: - name: name value: prefetch-dependencies-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:8eac535f436874ca27c70434ff356ba941d7b649c0e861bca6f4e9fc5e215478 + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:c664a6df6514b59c3ce53570b0994b45af66ecc89ba2a8e41834eae0622addf6 - name: kind value: task resolver: bundles @@ -337,7 +337,7 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:fe66734cde6713f530b19e222680acc5e0d6a533866429659ea8561b746d9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a - name: kind value: task resolver: bundles @@ -365,7 +365,7 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:985d1efe861b02524a7679ecd855624b3d4e3a2e835b6f8a97ec7d135898ec0b + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 - name: kind value: task resolver: bundles @@ -389,7 +389,7 @@ spec: - name: name value: apply-tags - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:c89cd10b2a3f4c43789c5f06ef2b86f528b28f156c20af5e751fa8c0facd457d + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - name: kind value: task resolver: bundles @@ -436,7 +436,7 @@ spec: - name: name value: deprecated-image-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:808fe09bb5b8503de569de097ae5dd619a7488110f79e8e215e69862ee3fce6d + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:e3a55ccdf1091b4a35507f9ee2d1918d8e89a5f96babcb5486b491226da03d6f - name: kind value: task resolver: bundles @@ -461,7 +461,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:7c2a32de9021f16f6e8df08a55f539f12e00ea4d96f6fb37f9ea04167032c61f + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb - name: kind value: task resolver: bundles @@ -484,7 +484,7 @@ spec: - name: name value: ecosystem-cert-preflight-checks - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:33b7133c0c132c361295c30947f73bd45a3a3b62a24b83f3d8cd7c71f757828c - name: kind value: task resolver: bundles @@ -508,7 +508,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec - name: kind value: task resolver: bundles @@ -532,7 +532,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 - name: kind value: task resolver: bundles @@ -556,7 +556,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0eca130f289a1a1069a1b92943479f79aa7324e4e68d6396fd777ccd97058f50 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f - name: kind value: task resolver: bundles @@ -581,7 +581,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -601,7 +601,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0b10508c82ccb0f5a06a66ce7af56e9bfd40651ddefdf0f499988e897771ee28 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e - name: kind value: task resolver: bundles @@ -627,7 +627,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index 2194f0f6023c5..10f7ce5c63f61 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: show-sbom - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:beb0616db051952b4b861dd8c3e00fa1c0eccbd926feddf71194d3bb3ace9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:e2c1b4eac642f32e91f3bc5d3cb48c5c70888aaf45c3650d9ea34573de7a7fd5 - name: kind value: task resolver: bundles @@ -272,7 +272,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:b349d24cb896573695802d6913d311640b44675ec082b3ad167721946a6a0a71 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 - name: kind value: task resolver: bundles @@ -296,7 +296,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles @@ -355,7 +355,7 @@ spec: - name: name value: prefetch-dependencies-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:8eac535f436874ca27c70434ff356ba941d7b649c0e861bca6f4e9fc5e215478 + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:c664a6df6514b59c3ce53570b0994b45af66ecc89ba2a8e41834eae0622addf6 - name: kind value: task resolver: bundles @@ -518,7 +518,7 @@ spec: - name: name value: buildah-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.7@sha256:eeb86e8739801237bbefa84e1ab7873c887ca598ef859dd6d06548948b11b95f + value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.8@sha256:90cee1e47e3221f2bf1090e25228179ab7ac37dde0ffc8791805e63be2d3b4ab - name: kind value: task resolver: bundles @@ -541,7 +541,7 @@ spec: - name: name value: apply-tags - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:c89cd10b2a3f4c43789c5f06ef2b86f528b28f156c20af5e751fa8c0facd457d + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - name: kind value: task resolver: bundles @@ -584,7 +584,7 @@ spec: - name: name value: deprecated-image-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:808fe09bb5b8503de569de097ae5dd619a7488110f79e8e215e69862ee3fce6d + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:e3a55ccdf1091b4a35507f9ee2d1918d8e89a5f96babcb5486b491226da03d6f - name: kind value: task resolver: bundles @@ -604,7 +604,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:7c2a32de9021f16f6e8df08a55f539f12e00ea4d96f6fb37f9ea04167032c61f + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb - name: kind value: task resolver: bundles @@ -626,7 +626,7 @@ spec: - name: name value: fips-operator-bundle-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-fips-operator-bundle-check-oci-ta:0.1@sha256:d3b0730dac6a72db1de4a90f3f2703fb261365b2202cb79a9cf7cc56cec0671f + value: quay.io/konflux-ci/tekton-catalog/task-fips-operator-bundle-check-oci-ta:0.1@sha256:056742bc951d5154ddce92accfe450360b7f3a19ec515dd7635a9f2824a76423 - name: kind value: task resolver: bundles @@ -650,7 +650,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec - name: kind value: task resolver: bundles @@ -674,7 +674,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 - name: kind value: task resolver: bundles @@ -698,7 +698,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0eca130f289a1a1069a1b92943479f79aa7324e4e68d6396fd777ccd97058f50 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f - name: kind value: task resolver: bundles @@ -718,7 +718,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -738,7 +738,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0b10508c82ccb0f5a06a66ce7af56e9bfd40651ddefdf0f499988e897771ee28 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e - name: kind value: task resolver: bundles @@ -764,7 +764,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles diff --git a/.tekton/retag-pipeline.yaml b/.tekton/retag-pipeline.yaml index 9124aa506d5a4..fa9b9c6c388ab 100644 --- a/.tekton/retag-pipeline.yaml +++ b/.tekton/retag-pipeline.yaml @@ -117,7 +117,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles diff --git a/.tekton/scanner-v4-pipeline.yaml b/.tekton/scanner-v4-pipeline.yaml index de426d4da0bc1..9cba57d86fac0 100644 --- a/.tekton/scanner-v4-pipeline.yaml +++ b/.tekton/scanner-v4-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: show-sbom - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:beb0616db051952b4b861dd8c3e00fa1c0eccbd926feddf71194d3bb3ace9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:e2c1b4eac642f32e91f3bc5d3cb48c5c70888aaf45c3650d9ea34573de7a7fd5 - name: kind value: task resolver: bundles @@ -175,7 +175,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:b349d24cb896573695802d6913d311640b44675ec082b3ad167721946a6a0a71 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 - name: kind value: task resolver: bundles @@ -199,7 +199,7 @@ spec: - name: name value: git-clone-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:56f65a16d3d0485c64ad85af2c1f3e9b0bb4d02d63f2fd0ebb9498d219ca723d + value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:306b69e6db435ad4a7cf258b6219d9b998eb37da44f5e9ac882ac86a08109154 - name: kind value: task resolver: bundles @@ -278,7 +278,7 @@ spec: - name: name value: prefetch-dependencies-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:8eac535f436874ca27c70434ff356ba941d7b649c0e861bca6f4e9fc5e215478 + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:c664a6df6514b59c3ce53570b0994b45af66ecc89ba2a8e41834eae0622addf6 - name: kind value: task resolver: bundles @@ -331,7 +331,7 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:fe66734cde6713f530b19e222680acc5e0d6a533866429659ea8561b746d9ce7 + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a - name: kind value: task resolver: bundles @@ -358,7 +358,7 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:985d1efe861b02524a7679ecd855624b3d4e3a2e835b6f8a97ec7d135898ec0b + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 - name: kind value: task resolver: bundles @@ -382,7 +382,7 @@ spec: - name: name value: apply-tags - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:c89cd10b2a3f4c43789c5f06ef2b86f528b28f156c20af5e751fa8c0facd457d + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - name: kind value: task resolver: bundles @@ -429,7 +429,7 @@ spec: - name: name value: deprecated-image-check - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:808fe09bb5b8503de569de097ae5dd619a7488110f79e8e215e69862ee3fce6d + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:e3a55ccdf1091b4a35507f9ee2d1918d8e89a5f96babcb5486b491226da03d6f - name: kind value: task resolver: bundles @@ -454,7 +454,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:7c2a32de9021f16f6e8df08a55f539f12e00ea4d96f6fb37f9ea04167032c61f + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb - name: kind value: task resolver: bundles @@ -477,7 +477,7 @@ spec: - name: name value: ecosystem-cert-preflight-checks - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:33b7133c0c132c361295c30947f73bd45a3a3b62a24b83f3d8cd7c71f757828c - name: kind value: task resolver: bundles @@ -501,7 +501,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec - name: kind value: task resolver: bundles @@ -525,7 +525,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 - name: kind value: task resolver: bundles @@ -549,7 +549,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0eca130f289a1a1069a1b92943479f79aa7324e4e68d6396fd777ccd97058f50 + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f - name: kind value: task resolver: bundles @@ -574,7 +574,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3 + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f - name: kind value: task resolver: bundles @@ -594,7 +594,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:0b10508c82ccb0f5a06a66ce7af56e9bfd40651ddefdf0f499988e897771ee28 + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e - name: kind value: task resolver: bundles @@ -620,7 +620,7 @@ spec: - name: name value: push-dockerfile-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:6fb61bec5ef161225a850005233db68cfdc03ad54e1a54cc49cc98d98ea3d259 - name: kind value: task resolver: bundles From 7446e85c0f926ee75d341d537793640c7926d12b Mon Sep 17 00:00:00 2001 From: "red-hat-konflux[bot]" <126015336+red-hat-konflux[bot]@users.noreply.github.com> Date: Wed, 4 Feb 2026 07:13:04 +0000 Subject: [PATCH 112/232] chore(deps): update quay.io/rhacs-eng/konflux-tasks:latest docker digest to c31dabb (#18431) Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: red-hat-konflux[bot] <126015336+red-hat-konflux[bot]@users.noreply.github.com> --- .tekton/basic-component-pipeline.yaml | 6 +++--- .tekton/create-custom-snapshot.yaml | 8 ++++---- .tekton/main-pipeline.yaml | 8 ++++---- .tekton/operator-bundle-pipeline.yaml | 8 ++++---- .tekton/retag-pipeline.yaml | 10 +++++----- .tekton/scanner-v4-pipeline.yaml | 8 ++++---- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.tekton/basic-component-pipeline.yaml b/.tekton/basic-component-pipeline.yaml index 2fc8efa916168..d7dc0dab2dde3 100644 --- a/.tekton/basic-component-pipeline.yaml +++ b/.tekton/basic-component-pipeline.yaml @@ -49,7 +49,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -222,7 +222,7 @@ spec: - name: name value: determine-image-expiration - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -238,7 +238,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles diff --git a/.tekton/create-custom-snapshot.yaml b/.tekton/create-custom-snapshot.yaml index 77a7bdf70e5d1..4f1bab67c91f9 100644 --- a/.tekton/create-custom-snapshot.yaml +++ b/.tekton/create-custom-snapshot.yaml @@ -78,7 +78,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -161,7 +161,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -175,7 +175,7 @@ spec: - name: name value: wait-for-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -259,7 +259,7 @@ spec: - name: name value: create-snapshot-from-bundle - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles diff --git a/.tekton/main-pipeline.yaml b/.tekton/main-pipeline.yaml index d1773908211d2..780263d8ec5c8 100644 --- a/.tekton/main-pipeline.yaml +++ b/.tekton/main-pipeline.yaml @@ -49,7 +49,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -223,7 +223,7 @@ spec: - name: name value: determine-image-expiration - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -239,7 +239,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -259,7 +259,7 @@ spec: - name: name value: fetch-external-networks - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index 10f7ce5c63f61..1ec79c87223d5 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -49,7 +49,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -319,7 +319,7 @@ spec: - name: name value: determine-image-expiration - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -335,7 +335,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -372,7 +372,7 @@ spec: - name: name value: wait-for-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles diff --git a/.tekton/retag-pipeline.yaml b/.tekton/retag-pipeline.yaml index fa9b9c6c388ab..0338536076562 100644 --- a/.tekton/retag-pipeline.yaml +++ b/.tekton/retag-pipeline.yaml @@ -35,7 +35,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -136,7 +136,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -154,7 +154,7 @@ spec: - name: name value: determine-dependency-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -170,7 +170,7 @@ spec: - name: name value: wait-for-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -195,7 +195,7 @@ spec: - name: name value: retag-image - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c925228a20e1a87c93bccbe66444436ad0bdc88e2a20cbd576e7bad1686525af + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles diff --git a/.tekton/scanner-v4-pipeline.yaml b/.tekton/scanner-v4-pipeline.yaml index 9cba57d86fac0..dd1f79054e652 100644 --- a/.tekton/scanner-v4-pipeline.yaml +++ b/.tekton/scanner-v4-pipeline.yaml @@ -49,7 +49,7 @@ spec: - name: name value: post-bigquery-metrics - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -222,7 +222,7 @@ spec: - name: name value: determine-image-expiration - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -238,7 +238,7 @@ spec: - name: name value: determine-image-tag - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles @@ -258,7 +258,7 @@ spec: - name: name value: fetch-scanner-v4-vuln-mappings - name: bundle - value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:c3b069313d4de5d67dc662e31cbcdde02e134be5fe7dcdb5bceeb670f89fcf52 + value: quay.io/rhacs-eng/konflux-tasks:latest@sha256:728527ed22e3527c0b7fead4657d0e889d2506d32383351abdf93eb34b496a83 - name: kind value: task resolver: bundles From 7ec23fb757f2010cd75986350352111781a1b5b9 Mon Sep 17 00:00:00 2001 From: Misha Sugakov <537715+msugakov@users.noreply.github.com> Date: Wed, 4 Feb 2026 14:40:33 +0100 Subject: [PATCH 113/232] build: Use PyYAML dependency as RPM in bundle build (#18778) --- .tekton/operator-bundle-build.yaml | 2 +- .tekton/operator-bundle-pipeline.yaml | 2 + operator/Makefile | 2 +- operator/bundle_helpers/.gitignore | 1 + operator/bundle_helpers/README.md | 53 -- .../bundle_helpers/requirements-build.txt | 86 --- operator/bundle_helpers/requirements-gha.txt | 5 - operator/bundle_helpers/requirements.in | 1 - operator/bundle_helpers/requirements.txt | 65 +- operator/konflux.bundle.Dockerfile | 10 +- rpms.in.yaml | 2 + rpms.lock.yaml | 576 +++++++++++++----- 12 files changed, 446 insertions(+), 359 deletions(-) delete mode 100644 operator/bundle_helpers/README.md delete mode 100644 operator/bundle_helpers/requirements-build.txt delete mode 100644 operator/bundle_helpers/requirements-gha.txt delete mode 100644 operator/bundle_helpers/requirements.in diff --git a/.tekton/operator-bundle-build.yaml b/.tekton/operator-bundle-build.yaml index 08f39fc7e2c11..e729253421329 100644 --- a/.tekton/operator-bundle-build.yaml +++ b/.tekton/operator-bundle-build.yaml @@ -49,7 +49,7 @@ spec: - name: prefetch-input value: | [ - { "type": "pip", "path": "operator/bundle_helpers" } + { "type": "rpm", "path": "." } ] - name: build-source-image value: 'true' diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index 1ec79c87223d5..0d17c968af159 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -350,6 +350,8 @@ spec: value: $(params.output-image-repo):konflux-$(params.revision).prefetch - name: ociArtifactExpiresAfter value: $(params.oci-artifact-expires-after) + - name: ACTIVATION_KEY + value: subscription-manager-activation-key-prod taskRef: params: - name: name diff --git a/operator/Makefile b/operator/Makefile index e19dec7c080a0..a25efe522b009 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -434,7 +434,7 @@ upgrade-dirty-tag-via-olm: kuttl ACTIVATE_PYTHON = $(PYTHON) -m venv bundle_helpers/.venv ;\ . bundle_helpers/.venv/bin/activate ;\ pip3 install --upgrade pip==21.3.1 setuptools==59.6.0 ;\ - pip3 install -r bundle_helpers/requirements-gha.txt + pip3 install -r bundle_helpers/requirements.txt .PHONY: bundle bundle: yq manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files. diff --git a/operator/bundle_helpers/.gitignore b/operator/bundle_helpers/.gitignore index dc4a262c3d60f..8bda2111e1e21 100644 --- a/operator/bundle_helpers/.gitignore +++ b/operator/bundle_helpers/.gitignore @@ -1,4 +1,5 @@ /__pycache__/ +# Remnants of the old way we pulled Python dependencies in the operator-bundle build. requirements-build.in pip_find_builddeps.py diff --git a/operator/bundle_helpers/README.md b/operator/bundle_helpers/README.md deleted file mode 100644 index aa8f2a94c2af7..0000000000000 --- a/operator/bundle_helpers/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Bundle Helpers - -For hermetic builds with Konflux, we need to provide the full list of resolved dependencies in `requirements.txt`. -The dependency source files will be prefetched with Cachi2 and made available to the container image build. -Follow the procedure below after any dependencies change for successful builds in Konflux. - -## Prepare the fully resolved requirements files for Cachi2 - -### Prerequisite - -Run the steps inside a container of the same image as the [operator-bundle builder stage](../konflux.bundle.Dockerfile). - -```bash -docker run -it -v "$(git rev-parse --show-toplevel)/operator/bundle_helpers:/src" --entrypoint /bin/bash -w /src registry.access.redhat.com/ubi9/python-39:latest -# inside the container -python3 -m pip install pip-tools -``` - -### Instructions - -1. Generate a fully resolved requirements.txt: - -```bash -pip-compile requirements.in --generate-hashes -``` - -2. Download pip_find_builddeps.py: - -```bash -curl -fO https://raw.githubusercontent.com/containerbuildsystem/cachito/master/bin/pip_find_builddeps.py -chmod +x pip_find_builddeps.py -``` - -3. Generate a fully resolved `requirements-build.txt`: - -```bash -./pip_find_builddeps.py requirements.txt \ - -o requirements-build.in - -pip-compile requirements-build.in --allow-unsafe --generate-hashes -``` - -4. Exit the container and commit the changes. - -For more information, consult the [Cachi2 docs](https://github.com/containerbuildsystem/cachi2/blob/main/docs/pip.md#building-from-source). - -### What does each requirements file do? - -* `requirements.in`: List of project dependencies. -* `requirements-gha.txt`: The list of project dependencies as required by the build process on GHA and locally. This file exists as a workaround due to a different Python version in this context. Any changes in this or the `requirements.in` file should be synced manually to `requirements-gha.txt`. This file will be deleted after ROX-26860. -* `requirements.txt`: Fully resolved list of all transitive project dependencies. -* `requirements-build.txt`: Fully resolved list of all dependencies required to _build_ the project dependencies from sources in Konflux. -* `requirements-build.in` (not commited): Intermediate result for the generation of `requirements.txt`. diff --git a/operator/bundle_helpers/requirements-build.txt b/operator/bundle_helpers/requirements-build.txt deleted file mode 100644 index 5f7bb0f4c1dc5..0000000000000 --- a/operator/bundle_helpers/requirements-build.txt +++ /dev/null @@ -1,86 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes requirements-build.in -# -cython==3.0.12 \ - --hash=sha256:0038c9bae46c459669390e53a1ec115f8096b2e4647ae007ff1bf4e6dee92806 \ - --hash=sha256:0faa5e39e5c8cdf6f9c3b1c3f24972826e45911e7f5b99cf99453fca5432f45e \ - --hash=sha256:120681093772bf3600caddb296a65b352a0d3556e962b9b147efcfb8e8c9801b \ - --hash=sha256:1e5eadef80143026944ea8f9904715a008f5108d1d644a89f63094cc37351e73 \ - --hash=sha256:25529ee948f44d9a165ff960c49d4903267c20b5edf2df79b45924802e4cca6e \ - --hash=sha256:2d53de996ed340e9ab0fc85a88aaa8932f2591a2746e1ab1c06e262bd4ec4be7 \ - --hash=sha256:3083465749911ac3b2ce001b6bf17f404ac9dd35d8b08469d19dc7e717f5877a \ - --hash=sha256:309c081057930bb79dc9ea3061a1af5086c679c968206e9c9c2ec90ab7cb471a \ - --hash=sha256:3109e1d44425a2639e9a677b66cd7711721a5b606b65867cb2d8ef7a97e2237b \ - --hash=sha256:34ce459808f7d8d5d4007bc5486fe50532529096b43957af6cbffcb4d9cc5c8d \ - --hash=sha256:36fcd584dae547de6f095500a380f4a0cce72b7a7e409e9ff03cb9beed6ac7a1 \ - --hash=sha256:398d4576c1e1f6316282aa0b4a55139254fbed965cba7813e6d9900d3092b128 \ - --hash=sha256:3ccd1228cc203b1f1b8a3d403f5a20ad1c40e5879b3fbf5851ce09d948982f2c \ - --hash=sha256:3e4fa855d98bc7bd6a2049e0c7dc0dcf595e2e7f571a26e808f3efd84d2db374 \ - --hash=sha256:43c48b5789398b228ea97499f5b864843ba9b1ab837562a9227c6f58d16ede8b \ - --hash=sha256:4aa255781b093a8401109d8f2104bbb2e52de7639d5896aefafddc85c30e0894 \ - --hash=sha256:4ee6f1ea1bead8e6cbc4e64571505b5d8dbdb3b58e679d31f3a84160cebf1a1a \ - --hash=sha256:54115fcc126840926ff3b53cfd2152eae17b3522ae7f74888f8a41413bd32f25 \ - --hash=sha256:563de1728c8e48869d2380a1b76bbc1b1b1d01aba948480d68c1d05e52d20c92 \ - --hash=sha256:57aefa6d3341109e46ec1a13e3a763aaa2cbeb14e82af2485b318194be1d9170 \ - --hash=sha256:5a93cbda00a5451175b97dea5a9440a3fcee9e54b4cba7a7dbcba9a764b22aec \ - --hash=sha256:5e5f17c48a4f41557fbcc7ee660ccfebe4536a34c557f553b6893c1b3c83df2d \ - --hash=sha256:629db614b9c364596d7c975fa3fb3978e8c5349524353dbe11429896a783fc1e \ - --hash=sha256:62b79dcc0de49efe9e84b9d0e2ae0a6fc9b14691a65565da727aa2e2e63c6a28 \ - --hash=sha256:63d840f2975e44d74512f8f34f1f7cb8121c9428e26a3f6116ff273deb5e60a2 \ - --hash=sha256:680f1d6ed4436ae94805db264d6155ed076d2835d84f20dcb31a7a3ad7f8668c \ - --hash=sha256:712c3f31adec140dc60d064a7f84741f50e2c25a8edd7ae746d5eb4d3ef7072a \ - --hash=sha256:731d719423e041242c9303c80cae4327467299b90ffe62d4cc407e11e9ea3160 \ - --hash=sha256:75c5acd40b97cff16fadcf6901a91586cbca5dcdba81f738efaf1f4c6bc8dccb \ - --hash=sha256:77d48f2d4bab9fe1236eb753d18f03e8b2619af5b6f05d51df0532a92dfb38ab \ - --hash=sha256:7cffc3464f641c8d0dda942c7c53015291beea11ec4d32421bed2f13b386b819 \ - --hash=sha256:86c304b20bd57c727c7357e90d5ba1a2b6f1c45492de2373814d7745ef2e63b4 \ - --hash=sha256:879ae9023958d63c0675015369384642d0afb9c9d1f3473df9186c42f7a9d265 \ - --hash=sha256:8ab9f5198af74eb16502cc143cdde9ca1cbbf66ea2912e67440dd18a36e3b5fa \ - --hash=sha256:8c9efe9a0895abee3cadfdad4130b30f7b5e57f6e6a51ef2a44f9fc66a913880 \ - --hash=sha256:8d32856716c369d01f2385ad9177cdd1a11079ac89ea0932dc4882de1aa19174 \ - --hash=sha256:8ee841c0e114efa1e849c281ac9b8df8aa189af10b4a103b1c5fd71cbb799679 \ - --hash=sha256:90cf599372c5a22120609f7d3a963f17814799335d56dd0dcf8fe615980a8ae1 \ - --hash=sha256:9f8c48748a9c94ea5d59c26ab49ad0fad514d36f894985879cf3c3ca0e600bf4 \ - --hash=sha256:a4032e48d4734d2df68235d21920c715c451ac9de15fa14c71b378e8986b83be \ - --hash=sha256:a7fec4f052b8fe173fe70eae75091389955b9a23d5cec3d576d21c5913b49d47 \ - --hash=sha256:af081838b0f9e12a83ec4c3809a00a64c817f489f7c512b0e3ecaf5f90a2a816 \ - --hash=sha256:b588c0a089a9f4dd316d2f9275230bad4a7271e5af04e1dc41d2707c816be44b \ - --hash=sha256:b988bb297ce76c671e28c97d017b95411010f7c77fa6623dd0bb47eed1aee1bc \ - --hash=sha256:ba67eee9413b66dd9fbacd33f0bc2e028a2a120991d77b5fd4b19d0b1e4039b9 \ - --hash=sha256:bee2717e5b5f7d966d0c6e27d2efe3698c357aa4d61bb3201997c7a4f9fe485a \ - --hash=sha256:bfb75123dd4ff767baa37d7036da0de2dfb6781ff256eef69b11b88b9a0691d1 \ - --hash=sha256:c0b91c7ebace030dd558ea28730de8c580680b50768e5af66db2904a3716c3e3 \ - --hash=sha256:c151082884be468f2f405645858a857298ac7f7592729e5b54788b5c572717ba \ - --hash=sha256:c1879c073e2b34924ce9b7ca64c212705dcc416af4337c45f371242b2e5f6d32 \ - --hash=sha256:c3238a29f37999e27494d120983eca90d14896b2887a0bd858a381204549137a \ - --hash=sha256:d3a8f81980ffbd74e52f9186d8f1654e347d0c44bfea6b5997028977f481a179 \ - --hash=sha256:d4b70fc339adba1e2111b074ee6119fe9fd6072c957d8597bce9a0dd1c3c6784 \ - --hash=sha256:d6945694c5b9170cfbd5f2c0d00ef7487a2de7aba83713a64ee4ebce7fad9e05 \ - --hash=sha256:d6c6cd6a75c8393e6805d17f7126b96a894f310a1a9ea91c47d141fb9341bfa8 \ - --hash=sha256:dcdc3e5d4ce0e7a4af6903ed580833015641e968d18d528d8371e2435a34132c \ - --hash=sha256:dfdbea486e702c328338314adb8e80f5f9741f06a0ae83aaec7463bc166d12e8 \ - --hash=sha256:e62564457851db1c40399bd95a5346b9bb99e17a819bf583b362f418d8f3457a \ - --hash=sha256:ea3a0e19ab77266c738aa110684a753a04da4e709472cadeff487133354d6ab8 \ - --hash=sha256:ebc24609613fa06d0d896309f7164ba168f7e8d71c1e490ed2a08d23351c3f41 \ - --hash=sha256:f39640f8df0400cde6882e23c734f15bb8196de0a008ae5dc6c8d1ec5957d7c8 \ - --hash=sha256:fe030d4a00afb2844f5f70896b7f2a1a0d7da09bf3aa3d884cbe5f73fff5d310 \ - --hash=sha256:feb86122a823937cc06e4c029d80ff69f082ebb0b959ab52a5af6cdd271c5dc3 \ - --hash=sha256:ff5c0b6a65b08117d0534941d404833d516dac422eee88c6b4fd55feb409a5ed - # via -r requirements-build.in -flit-core==3.10.1 \ - --hash=sha256:66e5b87874a0d6e39691f0e22f09306736b633548670ad3c09ec9db03c5662f7 \ - --hash=sha256:cb31a76e8b31ad3351bb89e531f64ef2b05d1e65bd939183250bf81ddf4922a8 - # via -r requirements-build.in -wheel==0.45.1 \ - --hash=sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729 \ - --hash=sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248 - # via -r requirements-build.in - -# The following packages are considered to be unsafe in a requirements file: -setuptools==78.1.1 \ - --hash=sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561 \ - --hash=sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d - # via -r requirements-build.in diff --git a/operator/bundle_helpers/requirements-gha.txt b/operator/bundle_helpers/requirements-gha.txt deleted file mode 100644 index a513b595dfd02..0000000000000 --- a/operator/bundle_helpers/requirements-gha.txt +++ /dev/null @@ -1,5 +0,0 @@ -# TODO(ROX-26860): remove this file and use just requirements.txt once the GHA operator build runs with Python 3.9. -# PyYAML > 6.0 requires Python > 3.6. -PyYAML==6.0 -# pytest==7.0.1 is the latest available for the quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 job container's Python. -pytest==7.0.1 diff --git a/operator/bundle_helpers/requirements.in b/operator/bundle_helpers/requirements.in deleted file mode 100644 index aa4d4d0668b61..0000000000000 --- a/operator/bundle_helpers/requirements.in +++ /dev/null @@ -1 +0,0 @@ -pyyaml==6.0.2 diff --git a/operator/bundle_helpers/requirements.txt b/operator/bundle_helpers/requirements.txt index 4184dd797496d..afab1d7091db5 100644 --- a/operator/bundle_helpers/requirements.txt +++ b/operator/bundle_helpers/requirements.txt @@ -1,61 +1,4 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --generate-hashes requirements.in -# -pyyaml==6.0.2 \ - --hash=sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff \ - --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \ - --hash=sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086 \ - --hash=sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e \ - --hash=sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133 \ - --hash=sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5 \ - --hash=sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484 \ - --hash=sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee \ - --hash=sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5 \ - --hash=sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68 \ - --hash=sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a \ - --hash=sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf \ - --hash=sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99 \ - --hash=sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8 \ - --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 \ - --hash=sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19 \ - --hash=sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc \ - --hash=sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a \ - --hash=sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1 \ - --hash=sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317 \ - --hash=sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c \ - --hash=sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631 \ - --hash=sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d \ - --hash=sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652 \ - --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \ - --hash=sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e \ - --hash=sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b \ - --hash=sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8 \ - --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \ - --hash=sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706 \ - --hash=sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563 \ - --hash=sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237 \ - --hash=sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b \ - --hash=sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083 \ - --hash=sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180 \ - --hash=sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425 \ - --hash=sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e \ - --hash=sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f \ - --hash=sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725 \ - --hash=sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183 \ - --hash=sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab \ - --hash=sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774 \ - --hash=sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725 \ - --hash=sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e \ - --hash=sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5 \ - --hash=sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d \ - --hash=sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290 \ - --hash=sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44 \ - --hash=sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed \ - --hash=sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4 \ - --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \ - --hash=sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12 \ - --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4 - # via -r requirements.in +# PyYAML > 6.0 requires Python > 3.6. +PyYAML==6.0 +# pytest==7.0.1 is the latest available for the quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 job container's Python. +pytest==7.0.1 diff --git a/operator/konflux.bundle.Dockerfile b/operator/konflux.bundle.Dockerfile index ef18a782ad16c..44da03fe694ac 100644 --- a/operator/konflux.bundle.Dockerfile +++ b/operator/konflux.bundle.Dockerfile @@ -1,11 +1,7 @@ -FROM registry.access.redhat.com/ubi9/python-39:latest@sha256:c2112827949a0f2deb040bc8f2a57631daaddd453db2198258275668996dd65f AS builder +FROM registry.access.redhat.com/ubi8/ubi-minimal:latest@sha256:5dc6ba426ccbeb3954ead6b015f36b4a2d22320e5b356b074198d08422464ed2 AS builder -# Because 'default' user cannot create build/ directory and errrors like: -# mkdir: cannot create directory ‘build/’: Permission denied -USER root - -COPY ./operator/bundle_helpers/requirements.txt /tmp/requirements.txt -RUN pip3 install --no-cache-dir -r /tmp/requirements.txt +# This installs both PyYAML and Python. +RUN microdnf -y install python3.12-pyyaml COPY . /stackrox WORKDIR /stackrox/operator diff --git a/rpms.in.yaml b/rpms.in.yaml index ece356584c5b3..ea9f137d25850 100644 --- a/rpms.in.yaml +++ b/rpms.in.yaml @@ -7,6 +7,8 @@ packages: # final stage in image/rhel/konflux.Dockerfile - findutils - postgresql +# builder stage in operator/konflux.bundle.Dockerfile +- python3.12-pyyaml moduleEnable: # final stage in image/rhel/konflux.Dockerfile - postgresql:15 diff --git a/rpms.lock.yaml b/rpms.lock.yaml index a4d67cdc387fe..1a220344be53d 100644 --- a/rpms.lock.yaml +++ b/rpms.lock.yaml @@ -18,6 +18,13 @@ arches: name: libxkbcommon evr: 0.9.1-1.el8 sourcerpm: libxkbcommon-0.9.1-1.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/m/mpdecimal-2.5.1-3.el8.aarch64.rpm + repoid: rhel-8-for-aarch64-appstream-rpms + size: 95196 + checksum: sha256:02fdb7a4c616e2705ee31f6f834fbbb7aed91ca3cedf0ed3d5fd4c771ca46336 + name: mpdecimal + evr: 2.5.1-3.el8 + sourcerpm: mpdecimal-2.5.1-3.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/o/oniguruma-6.8.2-3.el8.aarch64.rpm repoid: rhel-8-for-aarch64-appstream-rpms size: 185652 @@ -39,6 +46,34 @@ arches: name: postgresql-private-libs evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/python3.12-3.12.12-1.el8_10.aarch64.rpm + repoid: rhel-8-for-aarch64-appstream-rpms + size: 31172 + checksum: sha256:3196808862e0ca0de2e4f0e4320ce542168c24894405a87ae516eccb83d037af + name: python3.12 + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/python3.12-libs-3.12.12-1.el8_10.aarch64.rpm + repoid: rhel-8-for-aarch64-appstream-rpms + size: 10268632 + checksum: sha256:76108f4835e417d3b9e6f07edf65c992e27255281a09736d591abc67d7b43049 + name: python3.12-libs + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/python3.12-pip-wheel-23.2.1-4.el8.noarch.rpm + repoid: rhel-8-for-aarch64-appstream-rpms + size: 1539820 + checksum: sha256:c5daaf3d7ef6c52f178ec6c20e49fa9ddb4506f1b0ee5cd1688046c28eb5e1cb + name: python3.12-pip-wheel + evr: 23.2.1-4.el8 + sourcerpm: python3.12-pip-23.2.1-4.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/p/python3.12-pyyaml-6.0.1-2.el8.aarch64.rpm + repoid: rhel-8-for-aarch64-appstream-rpms + size: 198784 + checksum: sha256:6a4945457be6e77d81f9dd7dce182d501d7fe90277b351db15f2b95589545711 + name: python3.12-pyyaml + evr: 6.0.1-2.el8 + sourcerpm: python3.12-pyyaml-6.0.1-2.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-aarch64-appstream-rpms size: 801000 @@ -522,13 +557,13 @@ arches: name: libattr evr: 2.4.48-3.el8 sourcerpm: attr-2.4.48-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libblkid-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libblkid-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 218828 - checksum: sha256:f095a01eb9eb5d47a41beebfb0f9069365ca0a67ab2e137bc8501c748344b3ea + size: 219120 + checksum: sha256:02202c15390d7ecf9c48e4bce8b0a81b5eff270ffcfbec998c4546bf9483d293 name: libblkid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libcap-2.48-6.el8_9.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 75344 @@ -578,13 +613,13 @@ arches: name: libdb-utils evr: 5.3.28-42.el8_4 sourcerpm: libdb-5.3.28-42.el8_4.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libfdisk-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libfdisk-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 249284 - checksum: sha256:77480a0068650a9b639f0d8ce7875be243e8577c85d1453b17a7360bc30999cf + size: 249448 + checksum: sha256:71b9420a10ef6d32a46068dd0a8f8cd44b3eefabe02bde00346d6cc72cb7d5cc name: libfdisk - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libffi-3.1-24.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 37560 @@ -641,13 +676,13 @@ arches: name: libkcapi-hmaccalc evr: 1.4.0-2.el8 sourcerpm: libkcapi-1.4.0-2.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libmount-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libmount-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 234412 - checksum: sha256:9515a7b004a80109e8d7097403cb85f93d2ea886be7663fd7df7371e3cf3cb4b + size: 234604 + checksum: sha256:d94b2c54047114f914301266ca4185dcb2b90e32b3feb7fcb88c8777f992ed4b name: libmount - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libnghttp2-1.33.0-6.el8_10.1.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 77144 @@ -711,13 +746,13 @@ arches: name: libsigsegv evr: 2.11-5.el8 sourcerpm: libsigsegv-2.11-5.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libsmartcols-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libsmartcols-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 178592 - checksum: sha256:c0038da4290bbbad241f36f5f7f9b16b0bb11baf108973eef0b9a9dc41301e65 + size: 178700 + checksum: sha256:55da9f33dd305e6d6e156b96fde1f0a2b96b184507b80575b832033aaaaece1b name: libsmartcols - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libssh-0.9.6-16.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 214584 @@ -767,13 +802,13 @@ arches: name: libutempter evr: 1.1.6-14.el8 sourcerpm: libutempter-1.1.6-14.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libuuid-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libuuid-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 100032 - checksum: sha256:24ecbd85e9f6a07435abd85815526ad7d3f2292451424b41685d377a3088fd29 + size: 100212 + checksum: sha256:0cd068fb65f72bd3be463f4aed931340bad45ebdb1adf2a0ed1b9a3f3d9b96d5 name: libuuid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libverto-0.3.2-2.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 24168 @@ -795,6 +830,13 @@ arches: name: libxml2 evr: 2.9.7-21.el8_10.3 sourcerpm: libxml2-2.9.7-21.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libyaml-0.1.7-5.el8.aarch64.rpm + repoid: rhel-8-for-aarch64-baseos-rpms + size: 58456 + checksum: sha256:7864fbc866ae5a3e59b4f0f114b77ff52b55e76c5388a917f82a6097f02a4db7 + name: libyaml + evr: 0.1.7-5.el8 + sourcerpm: libyaml-0.1.7-5.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/l/libzstd-1.4.4-1.el8.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 246104 @@ -1117,13 +1159,13 @@ arches: name: tzdata evr: 2025c-1.el8 sourcerpm: tzdata-2025c-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/u/util-linux-2.32.1-47.el8_10.aarch64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/u/util-linux-2.32.1-48.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms - size: 2587504 - checksum: sha256:5632571c56cabce61cd6c7f88411e0f77743a9cc163f677a73a6b603b8f15044 + size: 2587976 + checksum: sha256:8a737712dfe00ec954e1f959c1b25faaf02700548efd4dfa37539a3dcaa9022e name: util-linux - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/os/Packages/w/which-2.21-21.el8_10.aarch64.rpm repoid: rhel-8-for-aarch64-baseos-rpms size: 50372 @@ -1165,6 +1207,12 @@ arches: checksum: sha256:ca72f33bbbdd245bf1d2385e5f934d36b0ebdc9854b242fce7be0bb56bfa8255 name: libxkbcommon evr: 0.9.1-1.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/m/mpdecimal-2.5.1-3.el8.src.rpm + repoid: rhel-8-for-aarch64-appstream-source-rpms + size: 3333112 + checksum: sha256:36811086e6cf10de04a5cfcbc3599228e073a44e032fac74d72857c970f06cae + name: mpdecimal + evr: 2.5.1-3.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/o/oniguruma-6.8.2-3.el8.src.rpm repoid: rhel-8-for-aarch64-appstream-source-rpms size: 982385 @@ -1177,6 +1225,24 @@ arches: checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/p/python3.12-3.12.12-1.el8_10.src.rpm + repoid: rhel-8-for-aarch64-appstream-source-rpms + size: 20869970 + checksum: sha256:0f8e31ed00a577ddbf493a04b70ea364dd0b35fd2a30f2a0e5719b68aeb244b9 + name: python3.12 + evr: 3.12.12-1.el8_10 + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/p/python3.12-pip-23.2.1-4.el8.src.rpm + repoid: rhel-8-for-aarch64-appstream-source-rpms + size: 9393232 + checksum: sha256:5661eadc28225da228c8a053fe6606cd2159238a469bc9a55fce9dbebb5e2232 + name: python3.12-pip + evr: 23.2.1-4.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/p/python3.12-pyyaml-6.0.1-2.el8.src.rpm + repoid: rhel-8-for-aarch64-appstream-source-rpms + size: 131090 + checksum: sha256:4254452a01f54622981f2716889690c281d1a5352ec07dbc919c96c3724ff7dc + name: python3.12-pyyaml + evr: 6.0.1-2.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-aarch64-appstream-source-rpms size: 1699339 @@ -1603,6 +1669,12 @@ arches: checksum: sha256:a236b9807436c13e06c88926d7d3b25c2746f5b7fb12fadc41462a00d448a1f7 name: libxml2 evr: 2.9.7-21.el8_10.3 + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/l/libyaml-0.1.7-5.el8.src.rpm + repoid: rhel-8-for-aarch64-baseos-source-rpms + size: 540142 + checksum: sha256:5053fdb2c1384f513795f67c997eef3bc41290958ed8da17cf19edebc3dfdc83 + name: libyaml + evr: 0.1.7-5.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/l/lua-5.3.4-12.el8.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 437265 @@ -1813,12 +1885,12 @@ arches: checksum: sha256:c9798a08b98344921713d3183bda98727df494d83f96924604b6b755ddc30f61 name: tzdata evr: 2025c-1.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-47.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-48.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms - size: 4817466 - checksum: sha256:e10e379f1386bdd6315e20cf735616747690c137ada562f47da85ca90ea966ee + size: 4820524 + checksum: sha256:c3ef99808b2afcaf8a5cf2dfef379980779bc1bee42688006fa22b5806b3471d name: util-linux - evr: 2.32.1-47.el8_10 + evr: 2.32.1-48.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/baseos/source/SRPMS/Packages/w/which-2.21-21.el8_10.src.rpm repoid: rhel-8-for-aarch64-baseos-source-rpms size: 171834 @@ -1844,10 +1916,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/repodata/25be956846b5cef874bf04027dd634d1f2ab0e79fc2273ade1b8ddde58acb20f-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/aarch64/appstream/os/repodata/99ee9d15ef425ab46ff91056aa839108249d2918ce58cf257344cd15c0836335-modules.yaml.gz repoid: rhel-8-for-aarch64-appstream-rpms size: 760949 - checksum: sha256:25be956846b5cef874bf04027dd634d1f2ab0e79fc2273ade1b8ddde58acb20f + checksum: sha256:99ee9d15ef425ab46ff91056aa839108249d2918ce58cf257344cd15c0836335 - arch: ppc64le packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/j/jq-1.6-11.el8_10.ppc64le.rpm @@ -1864,6 +1936,13 @@ arches: name: libxkbcommon evr: 0.9.1-1.el8 sourcerpm: libxkbcommon-0.9.1-1.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/m/mpdecimal-2.5.1-3.el8.ppc64le.rpm + repoid: rhel-8-for-ppc64le-appstream-rpms + size: 107920 + checksum: sha256:becf9d9d37f87c08958351a21b6e83adb3a80beb1d3e68b4dab4e409e8167dc6 + name: mpdecimal + evr: 2.5.1-3.el8 + sourcerpm: mpdecimal-2.5.1-3.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/o/oniguruma-6.8.2-3.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-appstream-rpms size: 204564 @@ -1885,6 +1964,34 @@ arches: name: postgresql-private-libs evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/python3.12-3.12.12-1.el8_10.ppc64le.rpm + repoid: rhel-8-for-ppc64le-appstream-rpms + size: 31364 + checksum: sha256:4d6ec6c8c07ba2e884184c2f76d28bd4d8ab5fec56fdb5635ba417c4576fccc2 + name: python3.12 + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/python3.12-libs-3.12.12-1.el8_10.ppc64le.rpm + repoid: rhel-8-for-ppc64le-appstream-rpms + size: 10779132 + checksum: sha256:ceb7cd166122b72964673c77a5a6bb32a28caec8479d926c41bdafa1f8b91aa9 + name: python3.12-libs + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/python3.12-pip-wheel-23.2.1-4.el8.noarch.rpm + repoid: rhel-8-for-ppc64le-appstream-rpms + size: 1539820 + checksum: sha256:c5daaf3d7ef6c52f178ec6c20e49fa9ddb4506f1b0ee5cd1688046c28eb5e1cb + name: python3.12-pip-wheel + evr: 23.2.1-4.el8 + sourcerpm: python3.12-pip-23.2.1-4.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/p/python3.12-pyyaml-6.0.1-2.el8.ppc64le.rpm + repoid: rhel-8-for-ppc64le-appstream-rpms + size: 206180 + checksum: sha256:c7c68c07ce5ffe935b07cc4c886c95707ad8e05be1c9ba32e2371bab2d4182ff + name: python3.12-pyyaml + evr: 6.0.1-2.el8 + sourcerpm: python3.12-pyyaml-6.0.1-2.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-ppc64le-appstream-rpms size: 801000 @@ -2368,13 +2475,13 @@ arches: name: libattr evr: 2.4.48-3.el8 sourcerpm: attr-2.4.48-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libblkid-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libblkid-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 247084 - checksum: sha256:0588a0f4ec7a55aaee28a3c34f2c02fc90d01af3a2a9b2a3e10bffc371b239c1 + size: 247288 + checksum: sha256:b147c1a3933ee6249a3ec6bd8b531532bc13cdf7c62edef811e8b933f36cc227 name: libblkid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libcap-2.48-6.el8_9.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 81276 @@ -2424,13 +2531,13 @@ arches: name: libdb-utils evr: 5.3.28-42.el8_4 sourcerpm: libdb-5.3.28-42.el8_4.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libfdisk-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libfdisk-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 275884 - checksum: sha256:3fee492a9a891261d9a38749ceece9ee6511549033ec7ab6c9a5d7c9b28803fb + size: 276132 + checksum: sha256:4403752475cb3641193b4edb130a98436bb184eaa46820645baefd61c4bc5c47 name: libfdisk - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libffi-3.1-24.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 40004 @@ -2487,13 +2594,13 @@ arches: name: libkcapi-hmaccalc evr: 1.4.0-2.el8 sourcerpm: libkcapi-1.4.0-2.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libmount-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libmount-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 265396 - checksum: sha256:d8652ce55eff19ee1b6be7521bac17fa0b0a3f4db1feeaf9be8fd85b9a3336af + size: 265624 + checksum: sha256:a4e0cf5c1b0a92a399f88a2ae484b8e3bd14163d93b942e5a2a642fe0044c99f name: libmount - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libnghttp2-1.33.0-6.el8_10.1.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 87792 @@ -2564,13 +2671,13 @@ arches: name: libsigsegv evr: 2.11-5.el8 sourcerpm: libsigsegv-2.11-5.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libsmartcols-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libsmartcols-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 195556 - checksum: sha256:0dc6f9098a57ee71436a48337927da6b47e2d5312e52b725ce99d9a05d311d6e + size: 195728 + checksum: sha256:8b907b6793e7a1451e8fe3dab0a68447de80f7dc3bd16975e857910ae46676f5 name: libsmartcols - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libssh-0.9.6-16.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 245868 @@ -2620,13 +2727,13 @@ arches: name: libutempter evr: 1.1.6-14.el8 sourcerpm: libutempter-1.1.6-14.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libuuid-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libuuid-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 102548 - checksum: sha256:0b3bbe3ae4996e754868de28635ff8a696987ab2e316e75b4389608edf4348fa + size: 102736 + checksum: sha256:215aed0a7da0ff753c3b5a7b0f828744b0dbff2ed944f9ce46e4a4368d7c9d3b name: libuuid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libverto-0.3.2-2.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 26016 @@ -2648,6 +2755,13 @@ arches: name: libxml2 evr: 2.9.7-21.el8_10.3 sourcerpm: libxml2-2.9.7-21.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libyaml-0.1.7-5.el8.ppc64le.rpm + repoid: rhel-8-for-ppc64le-baseos-rpms + size: 69348 + checksum: sha256:9f1810ee304c2827027a4dadb0142f6940c28991b5371cbe302593eece6c25e4 + name: libyaml + evr: 0.1.7-5.el8 + sourcerpm: libyaml-0.1.7-5.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/l/libzstd-1.4.4-1.el8.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 282940 @@ -2970,13 +3084,13 @@ arches: name: tzdata evr: 2025c-1.el8 sourcerpm: tzdata-2025c-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/u/util-linux-2.32.1-47.el8_10.ppc64le.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/u/util-linux-2.32.1-48.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms - size: 2700484 - checksum: sha256:b48627739d83c990f30f2fb460a9aff87182e9a3160704e2a818c00a65dc7f03 + size: 2701164 + checksum: sha256:53a056e9e249281bef1227be4150f5d92d6fcf7efa5b347ed200d7b1d995e823 name: util-linux - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/os/Packages/w/which-2.21-21.el8_10.ppc64le.rpm repoid: rhel-8-for-ppc64le-baseos-rpms size: 52168 @@ -3018,6 +3132,12 @@ arches: checksum: sha256:ca72f33bbbdd245bf1d2385e5f934d36b0ebdc9854b242fce7be0bb56bfa8255 name: libxkbcommon evr: 0.9.1-1.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/m/mpdecimal-2.5.1-3.el8.src.rpm + repoid: rhel-8-for-ppc64le-appstream-source-rpms + size: 3333112 + checksum: sha256:36811086e6cf10de04a5cfcbc3599228e073a44e032fac74d72857c970f06cae + name: mpdecimal + evr: 2.5.1-3.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/o/oniguruma-6.8.2-3.el8.src.rpm repoid: rhel-8-for-ppc64le-appstream-source-rpms size: 982385 @@ -3030,6 +3150,24 @@ arches: checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/p/python3.12-3.12.12-1.el8_10.src.rpm + repoid: rhel-8-for-ppc64le-appstream-source-rpms + size: 20869970 + checksum: sha256:0f8e31ed00a577ddbf493a04b70ea364dd0b35fd2a30f2a0e5719b68aeb244b9 + name: python3.12 + evr: 3.12.12-1.el8_10 + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/p/python3.12-pip-23.2.1-4.el8.src.rpm + repoid: rhel-8-for-ppc64le-appstream-source-rpms + size: 9393232 + checksum: sha256:5661eadc28225da228c8a053fe6606cd2159238a469bc9a55fce9dbebb5e2232 + name: python3.12-pip + evr: 23.2.1-4.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/p/python3.12-pyyaml-6.0.1-2.el8.src.rpm + repoid: rhel-8-for-ppc64le-appstream-source-rpms + size: 131090 + checksum: sha256:4254452a01f54622981f2716889690c281d1a5352ec07dbc919c96c3724ff7dc + name: python3.12-pyyaml + evr: 6.0.1-2.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-ppc64le-appstream-source-rpms size: 1699339 @@ -3462,6 +3600,12 @@ arches: checksum: sha256:a236b9807436c13e06c88926d7d3b25c2746f5b7fb12fadc41462a00d448a1f7 name: libxml2 evr: 2.9.7-21.el8_10.3 + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/l/libyaml-0.1.7-5.el8.src.rpm + repoid: rhel-8-for-ppc64le-baseos-source-rpms + size: 540142 + checksum: sha256:5053fdb2c1384f513795f67c997eef3bc41290958ed8da17cf19edebc3dfdc83 + name: libyaml + evr: 0.1.7-5.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/l/lua-5.3.4-12.el8.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 437265 @@ -3672,12 +3816,12 @@ arches: checksum: sha256:c9798a08b98344921713d3183bda98727df494d83f96924604b6b755ddc30f61 name: tzdata evr: 2025c-1.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-47.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-48.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms - size: 4817466 - checksum: sha256:e10e379f1386bdd6315e20cf735616747690c137ada562f47da85ca90ea966ee + size: 4820524 + checksum: sha256:c3ef99808b2afcaf8a5cf2dfef379980779bc1bee42688006fa22b5806b3471d name: util-linux - evr: 2.32.1-47.el8_10 + evr: 2.32.1-48.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/baseos/source/SRPMS/Packages/w/which-2.21-21.el8_10.src.rpm repoid: rhel-8-for-ppc64le-baseos-source-rpms size: 171834 @@ -3703,10 +3847,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/repodata/df2eeda2b11e5c9b9f11a7a97a9a2c4725f099aa334f08e77e59bcc8055068a6-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/ppc64le/appstream/os/repodata/d9a4652fdaffa79cccb4bdb9abcc531af0cdd945e3eb06164e040c185de0e362-modules.yaml.gz repoid: rhel-8-for-ppc64le-appstream-rpms size: 758113 - checksum: sha256:df2eeda2b11e5c9b9f11a7a97a9a2c4725f099aa334f08e77e59bcc8055068a6 + checksum: sha256:d9a4652fdaffa79cccb4bdb9abcc531af0cdd945e3eb06164e040c185de0e362 - arch: s390x packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/j/jq-1.6-11.el8_10.s390x.rpm @@ -3723,6 +3867,13 @@ arches: name: libxkbcommon evr: 0.9.1-1.el8 sourcerpm: libxkbcommon-0.9.1-1.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/m/mpdecimal-2.5.1-3.el8.s390x.rpm + repoid: rhel-8-for-s390x-appstream-rpms + size: 97316 + checksum: sha256:68089fe2afcbc5ce486d6e233de53df547052182afc88ac47c539d2841d55dab + name: mpdecimal + evr: 2.5.1-3.el8 + sourcerpm: mpdecimal-2.5.1-3.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/o/oniguruma-6.8.2-3.el8.s390x.rpm repoid: rhel-8-for-s390x-appstream-rpms size: 188988 @@ -3800,6 +3951,34 @@ arches: name: postgresql-private-libs evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/python3.12-3.12.12-1.el8_10.s390x.rpm + repoid: rhel-8-for-s390x-appstream-rpms + size: 30920 + checksum: sha256:e0ee3c56e277aedd7d012000b6fd7aff9dc2089cafdba18e798e15375e983985 + name: python3.12 + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/python3.12-libs-3.12.12-1.el8_10.s390x.rpm + repoid: rhel-8-for-s390x-appstream-rpms + size: 10205140 + checksum: sha256:bf57c4f939ea41bf95af15f8377664842d27af5eb8d51f85f42a9d5f4bbf0d12 + name: python3.12-libs + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/python3.12-pip-wheel-23.2.1-4.el8.noarch.rpm + repoid: rhel-8-for-s390x-appstream-rpms + size: 1539820 + checksum: sha256:c5daaf3d7ef6c52f178ec6c20e49fa9ddb4506f1b0ee5cd1688046c28eb5e1cb + name: python3.12-pip-wheel + evr: 23.2.1-4.el8 + sourcerpm: python3.12-pip-23.2.1-4.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/p/python3.12-pyyaml-6.0.1-2.el8.s390x.rpm + repoid: rhel-8-for-s390x-appstream-rpms + size: 198108 + checksum: sha256:23a5eead78f86d5f5b4382e6c6fed3eca6a4fcd21e6620a3f2c590d97e8354db + name: python3.12-pyyaml + evr: 6.0.1-2.el8 + sourcerpm: python3.12-pyyaml-6.0.1-2.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-s390x-appstream-rpms size: 801000 @@ -4213,13 +4392,13 @@ arches: name: libattr evr: 2.4.48-3.el8 sourcerpm: attr-2.4.48-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libblkid-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libblkid-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 217464 - checksum: sha256:8a29b8ce7cafb898bcc8a3998eb1faf948538d4e293902eb36cd8caeaa732ca8 + size: 217612 + checksum: sha256:8d5c68fd87e697e0e309212426e4addf8902433cbc6286a85f195a766f711dc4 name: libblkid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libcap-2.48-6.el8_9.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 75024 @@ -4262,13 +4441,13 @@ arches: name: libdb-utils evr: 5.3.28-42.el8_4 sourcerpm: libdb-5.3.28-42.el8_4.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libfdisk-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libfdisk-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 249252 - checksum: sha256:ace74b38dd4fc20e28ac047ac97b895189cfba9f7f6af1fa19ad3883c4a294e4 + size: 249432 + checksum: sha256:e7afee088419dacd15b7213670771daa3fb2943ce7e43f188f18c7cdad17a6b0 name: libfdisk - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libffi-3.1-24.el8.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 36696 @@ -4311,13 +4490,13 @@ arches: name: libmnl evr: 1.0.4-6.el8 sourcerpm: libmnl-1.0.4-6.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libmount-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libmount-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 233224 - checksum: sha256:8ad944179fb0335b551cc6d7de8da6186b9b6f13f89898c6dbb71103f923776d + size: 233312 + checksum: sha256:f9831477b70439ad68c5dd863291dbbc6c534067735d1cc472f375c288ab7f50 name: libmount - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libnghttp2-1.33.0-6.el8_10.1.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 76440 @@ -4381,13 +4560,13 @@ arches: name: libsigsegv evr: 2.11-5.el8 sourcerpm: libsigsegv-2.11-5.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libsmartcols-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libsmartcols-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 178524 - checksum: sha256:f9e67d9af7a2e93b3ff169d4998828956ac51c55b85405282913bcfcb322f501 + size: 178708 + checksum: sha256:05d1310fa27caa5dd4be91f9384ca68cfa4505fcc859408b628fbfe0bc8f9b6d name: libsmartcols - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libssh-0.9.6-16.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 208564 @@ -4437,13 +4616,13 @@ arches: name: libutempter evr: 1.1.6-14.el8 sourcerpm: libutempter-1.1.6-14.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libuuid-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libuuid-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 100760 - checksum: sha256:187bf3a3f84fcb072e650ad7319304cc266965046f522b010345291b2fa6f551 + size: 100932 + checksum: sha256:7828876cc5878259daf38047c037cf7edd8835f7a571ad570d52bee27fef7fc8 name: libuuid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libverto-0.3.2-2.el8.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 23952 @@ -4465,6 +4644,13 @@ arches: name: libxml2 evr: 2.9.7-21.el8_10.3 sourcerpm: libxml2-2.9.7-21.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libyaml-0.1.7-5.el8.s390x.rpm + repoid: rhel-8-for-s390x-baseos-rpms + size: 56044 + checksum: sha256:998276e153886e014ce37c429a0f22b76f3ca955c1c9ba89999ce3dface1cf10 + name: libyaml + evr: 0.1.7-5.el8 + sourcerpm: libyaml-0.1.7-5.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/l/libzstd-1.4.4-1.el8.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 248556 @@ -5025,13 +5211,13 @@ arches: name: tzdata evr: 2025c-1.el8 sourcerpm: tzdata-2025c-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/u/util-linux-2.32.1-47.el8_10.s390x.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/u/util-linux-2.32.1-48.el8_10.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms - size: 2497388 - checksum: sha256:b3adf172d10ca587a37806ba7f0ae4a65caaee9836014498b998a2a214e24d6b + size: 2498196 + checksum: sha256:fcd7fa1b6fcc7e68831ad1fa49b14cd5f908fc5d47d0e80bb7dcf1ca38c5ea86 name: util-linux - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/os/Packages/x/xz-libs-5.2.4-4.el8_6.s390x.rpm repoid: rhel-8-for-s390x-baseos-rpms size: 95736 @@ -5059,6 +5245,12 @@ arches: checksum: sha256:ca72f33bbbdd245bf1d2385e5f934d36b0ebdc9854b242fce7be0bb56bfa8255 name: libxkbcommon evr: 0.9.1-1.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/m/mpdecimal-2.5.1-3.el8.src.rpm + repoid: rhel-8-for-s390x-appstream-source-rpms + size: 3333112 + checksum: sha256:36811086e6cf10de04a5cfcbc3599228e073a44e032fac74d72857c970f06cae + name: mpdecimal + evr: 2.5.1-3.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/o/oniguruma-6.8.2-3.el8.src.rpm repoid: rhel-8-for-s390x-appstream-source-rpms size: 982385 @@ -5119,6 +5311,24 @@ arches: checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/p/python3.12-3.12.12-1.el8_10.src.rpm + repoid: rhel-8-for-s390x-appstream-source-rpms + size: 20869970 + checksum: sha256:0f8e31ed00a577ddbf493a04b70ea364dd0b35fd2a30f2a0e5719b68aeb244b9 + name: python3.12 + evr: 3.12.12-1.el8_10 + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/p/python3.12-pip-23.2.1-4.el8.src.rpm + repoid: rhel-8-for-s390x-appstream-source-rpms + size: 9393232 + checksum: sha256:5661eadc28225da228c8a053fe6606cd2159238a469bc9a55fce9dbebb5e2232 + name: python3.12-pip + evr: 23.2.1-4.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/p/python3.12-pyyaml-6.0.1-2.el8.src.rpm + repoid: rhel-8-for-s390x-appstream-source-rpms + size: 131090 + checksum: sha256:4254452a01f54622981f2716889690c281d1a5352ec07dbc919c96c3724ff7dc + name: python3.12-pyyaml + evr: 6.0.1-2.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-s390x-appstream-source-rpms size: 1699339 @@ -5515,6 +5725,12 @@ arches: checksum: sha256:a236b9807436c13e06c88926d7d3b25c2746f5b7fb12fadc41462a00d448a1f7 name: libxml2 evr: 2.9.7-21.el8_10.3 + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/l/libyaml-0.1.7-5.el8.src.rpm + repoid: rhel-8-for-s390x-baseos-source-rpms + size: 540142 + checksum: sha256:5053fdb2c1384f513795f67c997eef3bc41290958ed8da17cf19edebc3dfdc83 + name: libyaml + evr: 0.1.7-5.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/l/lua-5.3.4-12.el8.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 437265 @@ -5893,12 +6109,12 @@ arches: checksum: sha256:c9798a08b98344921713d3183bda98727df494d83f96924604b6b755ddc30f61 name: tzdata evr: 2025c-1.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-47.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-48.el8_10.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms - size: 4817466 - checksum: sha256:e10e379f1386bdd6315e20cf735616747690c137ada562f47da85ca90ea966ee + size: 4820524 + checksum: sha256:c3ef99808b2afcaf8a5cf2dfef379980779bc1bee42688006fa22b5806b3471d name: util-linux - evr: 2.32.1-47.el8_10 + evr: 2.32.1-48.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/baseos/source/SRPMS/Packages/x/xz-5.2.4-4.el8_6.src.rpm repoid: rhel-8-for-s390x-baseos-source-rpms size: 1077113 @@ -5918,10 +6134,10 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/repodata/d817cfb2041d9563617f1459186cd4ee7aa2c8c94fe9ef9922df8145a0f77ff4-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/s390x/appstream/os/repodata/e68681399aa8ed3b5eb734bfa9c6fa22fdd0171fad35faa9626064b2d1b648a2-modules.yaml.gz repoid: rhel-8-for-s390x-appstream-rpms size: 759639 - checksum: sha256:d817cfb2041d9563617f1459186cd4ee7aa2c8c94fe9ef9922df8145a0f77ff4 + checksum: sha256:e68681399aa8ed3b5eb734bfa9c6fa22fdd0171fad35faa9626064b2d1b648a2 - arch: x86_64 packages: - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/j/jq-1.6-11.el8_10.x86_64.rpm @@ -5938,6 +6154,13 @@ arches: name: libxkbcommon evr: 0.9.1-1.el8 sourcerpm: libxkbcommon-0.9.1-1.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/m/mpdecimal-2.5.1-3.el8.x86_64.rpm + repoid: rhel-8-for-x86_64-appstream-rpms + size: 95204 + checksum: sha256:fd75319ca80fbfbf4ef5abe886033586aed718ed10e56ec42a345f56c1ca72b6 + name: mpdecimal + evr: 2.5.1-3.el8 + sourcerpm: mpdecimal-2.5.1-3.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/o/oniguruma-6.8.2-3.el8.x86_64.rpm repoid: rhel-8-for-x86_64-appstream-rpms size: 192632 @@ -5959,6 +6182,34 @@ arches: name: postgresql-private-libs evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 sourcerpm: postgresql-15.15-1.module+el8.10.0+23782+2d6b2a31.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/python3.12-3.12.12-1.el8_10.x86_64.rpm + repoid: rhel-8-for-x86_64-appstream-rpms + size: 30956 + checksum: sha256:e7ceeceeadc49a91aae41fadd4c1f363003d423043ec97124884023682e0231b + name: python3.12 + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/python3.12-libs-3.12.12-1.el8_10.x86_64.rpm + repoid: rhel-8-for-x86_64-appstream-rpms + size: 10502688 + checksum: sha256:e41dd02b13469b669a97fa9be509003e2df9b010af94630a025adde7e31fbddb + name: python3.12-libs + evr: 3.12.12-1.el8_10 + sourcerpm: python3.12-3.12.12-1.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/python3.12-pip-wheel-23.2.1-4.el8.noarch.rpm + repoid: rhel-8-for-x86_64-appstream-rpms + size: 1539820 + checksum: sha256:c5daaf3d7ef6c52f178ec6c20e49fa9ddb4506f1b0ee5cd1688046c28eb5e1cb + name: python3.12-pip-wheel + evr: 23.2.1-4.el8 + sourcerpm: python3.12-pip-23.2.1-4.el8.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/p/python3.12-pyyaml-6.0.1-2.el8.x86_64.rpm + repoid: rhel-8-for-x86_64-appstream-rpms + size: 208020 + checksum: sha256:9ccc9476927eba0e14c39897bb0a7b5e4b3d5fc95586871b579cdb39341c3cc8 + name: python3.12-pyyaml + evr: 6.0.1-2.el8 + sourcerpm: python3.12-pyyaml-6.0.1-2.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/Packages/x/xkeyboard-config-2.28-1.el8.noarch.rpm repoid: rhel-8-for-x86_64-appstream-rpms size: 801000 @@ -6442,13 +6693,13 @@ arches: name: libattr evr: 2.4.48-3.el8 sourcerpm: attr-2.4.48-3.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libblkid-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libblkid-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 225348 - checksum: sha256:d20de50e05c6c7a6a3232c57ba69a2e62d0aebe4ebe5540f6b4776eb762465a8 + size: 225512 + checksum: sha256:1ea002cffcfbccaa258fffebb05bee62ec92b6ca6c3204b2eece0d3142122912 name: libblkid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libcap-2.48-6.el8_9.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 76264 @@ -6498,13 +6749,13 @@ arches: name: libdb-utils evr: 5.3.28-42.el8_4 sourcerpm: libdb-5.3.28-42.el8_4.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libfdisk-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libfdisk-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 259176 - checksum: sha256:93d94607b800a70cffe242fdaf13ebcf9a62eb77aa98564bab7087f86a8e0832 + size: 259376 + checksum: sha256:59733655dc4a424ab2314895e5c2f7e274180bb092b65167cbf23e5634662a0a name: libfdisk - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libffi-3.1-24.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 38584 @@ -6561,13 +6812,13 @@ arches: name: libkcapi-hmaccalc evr: 1.4.0-2.el8 sourcerpm: libkcapi-1.4.0-2.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libmount-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libmount-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 241732 - checksum: sha256:c92289f2e195e15fece08617be1d675abfd513109a0bd14c5cf45fcd68fb84a9 + size: 241976 + checksum: sha256:41bf9c5ca51eee9e986e5abf81d10fa3e5abe93b0a11d1cd9e0ca4f59f6d7322 name: libmount - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libnghttp2-1.33.0-6.el8_10.1.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 80224 @@ -6631,13 +6882,13 @@ arches: name: libsigsegv evr: 2.11-5.el8 sourcerpm: libsigsegv-2.11-5.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libsmartcols-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libsmartcols-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 183072 - checksum: sha256:7203046a7bbf0c72965933901614a682a220800c43f69748f8a4cb209193061c + size: 183356 + checksum: sha256:c33ea3acbc4eaeb7b5b9d72e683769665931e8839d13e9ccb0da4dcfaa1a3e16 name: libsmartcols - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libssh-0.9.6-16.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 224400 @@ -6687,13 +6938,13 @@ arches: name: libutempter evr: 1.1.6-14.el8 sourcerpm: libutempter-1.1.6-14.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libuuid-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libuuid-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 100768 - checksum: sha256:9ba65072e9949c2c6dfa85b8daa36292264f4c3e6a35a515b6ef572d3405aaba + size: 100956 + checksum: sha256:111e2fa9fc01063230326f03b6fc810ad1f01353b8102ab470b828102a535c0c name: libuuid - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libverto-0.3.2-2.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 24636 @@ -6715,6 +6966,13 @@ arches: name: libxml2 evr: 2.9.7-21.el8_10.3 sourcerpm: libxml2-2.9.7-21.el8_10.3.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libyaml-0.1.7-5.el8.x86_64.rpm + repoid: rhel-8-for-x86_64-baseos-rpms + size: 62872 + checksum: sha256:018409b1eda8be48a11a5b76b95e82ff1d9002569e0644291532d8424dc31edf + name: libyaml + evr: 0.1.7-5.el8 + sourcerpm: libyaml-0.1.7-5.el8.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/l/libzstd-1.4.4-1.el8.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 272364 @@ -7037,13 +7295,13 @@ arches: name: tzdata evr: 2025c-1.el8 sourcerpm: tzdata-2025c-1.el8.src.rpm - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/u/util-linux-2.32.1-47.el8_10.x86_64.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/u/util-linux-2.32.1-48.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms - size: 2596592 - checksum: sha256:4861ff37cf00bd0bab78a81004dfed1e7b0ee5355403510b9e78e2a90fc7226c + size: 2597936 + checksum: sha256:16f51c38ab76c0a1bfb1c9da94d48946a48d94367096ef855343db11574f5aca name: util-linux - evr: 2.32.1-47.el8_10 - sourcerpm: util-linux-2.32.1-47.el8_10.src.rpm + evr: 2.32.1-48.el8_10 + sourcerpm: util-linux-2.32.1-48.el8_10.src.rpm - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/Packages/w/which-2.21-21.el8_10.x86_64.rpm repoid: rhel-8-for-x86_64-baseos-rpms size: 51220 @@ -7085,6 +7343,12 @@ arches: checksum: sha256:ca72f33bbbdd245bf1d2385e5f934d36b0ebdc9854b242fce7be0bb56bfa8255 name: libxkbcommon evr: 0.9.1-1.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/m/mpdecimal-2.5.1-3.el8.src.rpm + repoid: rhel-8-for-x86_64-appstream-source-rpms + size: 3333112 + checksum: sha256:36811086e6cf10de04a5cfcbc3599228e073a44e032fac74d72857c970f06cae + name: mpdecimal + evr: 2.5.1-3.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/o/oniguruma-6.8.2-3.el8.src.rpm repoid: rhel-8-for-x86_64-appstream-source-rpms size: 982385 @@ -7097,6 +7361,24 @@ arches: checksum: sha256:30795de4ed7a01becc64ee50796e7c76b9195ff1eed0a341b279aeb3e4b15527 name: postgresql evr: 15.15-1.module+el8.10.0+23782+2d6b2a31 + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/p/python3.12-3.12.12-1.el8_10.src.rpm + repoid: rhel-8-for-x86_64-appstream-source-rpms + size: 20869970 + checksum: sha256:0f8e31ed00a577ddbf493a04b70ea364dd0b35fd2a30f2a0e5719b68aeb244b9 + name: python3.12 + evr: 3.12.12-1.el8_10 + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/p/python3.12-pip-23.2.1-4.el8.src.rpm + repoid: rhel-8-for-x86_64-appstream-source-rpms + size: 9393232 + checksum: sha256:5661eadc28225da228c8a053fe6606cd2159238a469bc9a55fce9dbebb5e2232 + name: python3.12-pip + evr: 23.2.1-4.el8 + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/p/python3.12-pyyaml-6.0.1-2.el8.src.rpm + repoid: rhel-8-for-x86_64-appstream-source-rpms + size: 131090 + checksum: sha256:4254452a01f54622981f2716889690c281d1a5352ec07dbc919c96c3724ff7dc + name: python3.12-pyyaml + evr: 6.0.1-2.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/source/SRPMS/Packages/x/xkeyboard-config-2.28-1.el8.src.rpm repoid: rhel-8-for-x86_64-appstream-source-rpms size: 1699339 @@ -7523,6 +7805,12 @@ arches: checksum: sha256:a236b9807436c13e06c88926d7d3b25c2746f5b7fb12fadc41462a00d448a1f7 name: libxml2 evr: 2.9.7-21.el8_10.3 + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/l/libyaml-0.1.7-5.el8.src.rpm + repoid: rhel-8-for-x86_64-baseos-source-rpms + size: 540142 + checksum: sha256:5053fdb2c1384f513795f67c997eef3bc41290958ed8da17cf19edebc3dfdc83 + name: libyaml + evr: 0.1.7-5.el8 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/l/lua-5.3.4-12.el8.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 437265 @@ -7733,12 +8021,12 @@ arches: checksum: sha256:c9798a08b98344921713d3183bda98727df494d83f96924604b6b755ddc30f61 name: tzdata evr: 2025c-1.el8 - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-47.el8_10.src.rpm + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/u/util-linux-2.32.1-48.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms - size: 4817466 - checksum: sha256:e10e379f1386bdd6315e20cf735616747690c137ada562f47da85ca90ea966ee + size: 4820524 + checksum: sha256:c3ef99808b2afcaf8a5cf2dfef379980779bc1bee42688006fa22b5806b3471d name: util-linux - evr: 2.32.1-47.el8_10 + evr: 2.32.1-48.el8_10 - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/source/SRPMS/Packages/w/which-2.21-21.el8_10.src.rpm repoid: rhel-8-for-x86_64-baseos-source-rpms size: 171834 @@ -7764,7 +8052,7 @@ arches: name: zstd evr: 1.4.4-1.el8 module_metadata: - - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/8a32578434a9fadf8e1c3fcc5507caa7952479a54fe8042c996b50fea309cd30-modules.yaml.gz + - url: https://cdn.redhat.com/content/dist/rhel8/8/x86_64/appstream/os/repodata/550e1b946b2d79d0d0894346109db66cc7af465f5f4f27425a16ffc0cf4dc5cd-modules.yaml.gz repoid: rhel-8-for-x86_64-appstream-rpms size: 784123 - checksum: sha256:8a32578434a9fadf8e1c3fcc5507caa7952479a54fe8042c996b50fea309cd30 + checksum: sha256:550e1b946b2d79d0d0894346109db66cc7af465f5f4f27425a16ffc0cf4dc5cd From 9b5ee9ee77bce8fd7d91cc0bf8b3447686850996 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Wed, 4 Feb 2026 16:01:36 +0100 Subject: [PATCH 114/232] ROX-32883: Implement concurrent lane (#18788) Co-authored-by: Claude Sonnet 4.5 --- pkg/safe/channel.go | 161 +++++++ pkg/safe/channel_test.go | 394 +++++++++++++++++ pkg/safe/errors.go | 11 + .../pubsub/lane/{default.go => blocking.go} | 71 ++- .../{default_test.go => blocking_test.go} | 62 +-- sensor/common/pubsub/lane/concurrent.go | 169 ++++++++ sensor/common/pubsub/lane/concurrent_test.go | 408 ++++++++++++++++++ sensor/common/pubsub/metrics/metrics.go | 8 +- sensor/kubernetes/sensor/sensor.go | 4 +- 9 files changed, 1207 insertions(+), 81 deletions(-) create mode 100644 pkg/safe/channel.go create mode 100644 pkg/safe/channel_test.go create mode 100644 pkg/safe/errors.go rename sensor/common/pubsub/lane/{default.go => blocking.go} (64%) rename sensor/common/pubsub/lane/{default_test.go => blocking_test.go} (78%) create mode 100644 sensor/common/pubsub/lane/concurrent.go create mode 100644 sensor/common/pubsub/lane/concurrent_test.go diff --git a/pkg/safe/channel.go b/pkg/safe/channel.go new file mode 100644 index 0000000000000..1c9585b062fb7 --- /dev/null +++ b/pkg/safe/channel.go @@ -0,0 +1,161 @@ +package safe + +import ( + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/sync" +) + +// Channel provides a thread-safe channel with race-free shutdown semantics. +// It encapsulates a channel along with synchronization primitives to ensure +// safe writes and closure even during concurrent shutdown scenarios. +type Channel[T any] struct { + mu sync.RWMutex + ch chan T + closed bool + waitable concurrency.Waitable +} + +// NewChannel creates a new Channel with the specified buffer size. +// The waitable parameter is used to coordinate shutdown - writes will fail +// when the waitable is triggered. +// Panics if waitable is nil or size is negative. +func NewChannel[T any](size int, waitable concurrency.Waitable) *Channel[T] { + if waitable == nil { + panic("waitable must not be nil") + } + if size < 0 { + panic("size must not be negative") + } + return &Channel[T]{ + ch: make(chan T, size), + waitable: waitable, + } +} + +// Write pushes an item to the channel, blocking if the channel is full. +// This operation is safe to call concurrently with Close. +// +// Returns ErrWaitableTriggered if the waitable is triggered before or during the write. +// +// Thread-safety and double-select pattern: +// +// The RLock is required because Write/TryWrite calls may occur in different goroutines +// from the Close call, and not all Write/TryWrite calls are in the same goroutine either. +// RLock is sufficient (rather than full Lock) because writing to a channel is already +// thread-safe in Go; the lock only coordinates shutdown with Close. +// +// The double-select pattern prevents panics when writing to a closed channel: +// +// 1. Caller A: Write() -> acquires RLock +// 2. Caller B: Close() -> waits for lock (blocked by A's RLock) +// 3. Caller A: Write() ends -> releases RLock +// 4. Caller B: Close() acquires lock -> closes channel -> releases lock +// 5. Caller C: Write() -> acquires RLock -> first select detects triggered waitable -> exits early +// +// Without the first select (fast-path check), we would proceed to the second select where +// Go's select would randomly choose between the waitable channel and writing to the closed +// channel, potentially causing a panic. +// +// The second select is needed because if we're blocked waiting to write to a full channel +// and another caller triggers the waitable, we should immediately stop trying to write and exit. +func (s *Channel[T]) Write(item T) error { + s.mu.RLock() + defer s.mu.RUnlock() + + // First select: fast-path exit if waitable is already triggered + select { + case <-s.waitable.Done(): + return ErrWaitableTriggered + default: + } + + // Second select: exit if waitable is triggered while blocked on channel write + select { + case <-s.waitable.Done(): + return ErrWaitableTriggered + case s.ch <- item: + return nil + } +} + +// TryWrite attempts to push an item to the channel without blocking. +// If the channel is full, it returns ErrChannelFull immediately. +// This operation is safe to call concurrently with Close. +// +// Returns: +// - ErrWaitableTriggered if the waitable is triggered before or during the write +// - ErrChannelFull if the channel is full and cannot accept the item +// +// Thread-safety and double-select pattern: +// +// The RLock is required because Write/TryWrite calls may occur in different goroutines +// from the Close call, and not all Write/TryWrite calls are in the same goroutine either. +// RLock is sufficient (rather than full Lock) because writing to a channel is already +// thread-safe in Go; the lock only coordinates shutdown with Close. +// +// The double-select pattern prevents panics when writing to a closed channel: +// See the Write function documentation for a detailed explanation of the race condition +// this pattern prevents. +func (s *Channel[T]) TryWrite(item T) error { + s.mu.RLock() + defer s.mu.RUnlock() + + // First select: fast-path exit if waitable is already triggered + select { + case <-s.waitable.Done(): + return ErrWaitableTriggered + default: + } + + // Second select: exit if waitable is triggered, or return ErrChannelFull if full + select { + case <-s.waitable.Done(): + return ErrWaitableTriggered + case s.ch <- item: + return nil + default: + return ErrChannelFull + } +} + +// Chan returns a read-only view of the underlying channel. +// This can be used in select statements or to read from the channel. +func (s *Channel[T]) Chan() <-chan T { + return s.ch +} + +// Len returns the number of items currently in the channel. +func (s *Channel[T]) Len() int { + return len(s.ch) +} + +// Cap returns the capacity of the channel. +func (s *Channel[T]) Cap() int { + return cap(s.ch) +} + +// Close safely closes the underlying channel. +// This should be called after the waitable has been triggered. +// It is safe to call Close multiple times - subsequent calls are no-ops. +// Panics if called before the waitable has been triggered. +// +// Proper shutdown sequence: +// 1. Signal the waitable +// 2. Wait for the waitable +// 3. Call Close() +func (s *Channel[T]) Close() { + // Verify the waitable has been triggered to prevent potential deadlocks + select { + case <-s.waitable.Done(): + default: + // Waitable not triggered - this violates the contract + panic("Close() called before waitable was triggered") + } + + concurrency.WithLock(&s.mu, func() { + if !s.closed { + close(s.ch) + s.closed = true + } + }) +} diff --git a/pkg/safe/channel_test.go b/pkg/safe/channel_test.go new file mode 100644 index 0000000000000..f0077c79139c2 --- /dev/null +++ b/pkg/safe/channel_test.go @@ -0,0 +1,394 @@ +package safe + +import ( + "context" + "testing" + "testing/synctest" + + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/set" + "github.com/stackrox/rox/pkg/sync" + "github.com/stackrox/rox/pkg/testutils/goleak" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSafeChannel_Write_Success(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](5, ctx) + + // Write some items + err := ch.Write(1) + require.NoError(t, err) + + err = ch.Write(2) + require.NoError(t, err) + + err = ch.Write(3) + require.NoError(t, err) + + // Read and verify + assert.Equal(t, 1, <-ch.Chan()) + assert.Equal(t, 2, <-ch.Chan()) + assert.Equal(t, 3, <-ch.Chan()) +} + +func TestSafeChannel_Write_BlocksWhenFull(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](2, ctx) + + // Fill the channel + require.NoError(t, ch.Write(1)) + require.NoError(t, ch.Write(2)) + + // Write should block + writeStarted := concurrency.NewSignal() + writeCompleted := concurrency.NewSignal() + go func() { + writeStarted.Signal() + _ = ch.Write(3) + writeCompleted.Signal() + }() + + // Wait for write to start + <-writeStarted.Done() + + // Wait for the goroutine to become blocked + synctest.Wait() + + // Verify write has not completed (still blocked) + select { + case <-writeCompleted.Done(): + t.Fatal("Write should have blocked on full channel") + default: + // Expected - write is blocked + } + + // Unblock by reading + assert.Equal(t, 1, <-ch.Chan()) + + // Wait for write to complete + <-writeCompleted.Done() + + // Verify the third item was written + assert.Equal(t, 2, <-ch.Chan()) + assert.Equal(t, 3, <-ch.Chan()) + }) +} + +func TestSafeChannel_Write_FailsAfterWaitableTriggered(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](5, ctx) + + // Cancel the context + cancel() + + // Write should fail + err := ch.Write(1) + assert.ErrorIs(t, err, ErrWaitableTriggered) +} + +func TestSafeChannel_TryWrite_Success(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](5, ctx) + + // TryWrite some items + err := ch.TryWrite(1) + require.NoError(t, err) + + err = ch.TryWrite(2) + require.NoError(t, err) + + // Read and verify + assert.Equal(t, 1, <-ch.Chan()) + assert.Equal(t, 2, <-ch.Chan()) +} + +func TestSafeChannel_TryWrite_FailsWhenFull(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](2, ctx) + + // Fill the channel + require.NoError(t, ch.TryWrite(1)) + require.NoError(t, ch.TryWrite(2)) + + // TryWrite should fail immediately + err := ch.TryWrite(3) + assert.ErrorIs(t, err, ErrChannelFull) + + // Channel should still have the original items + assert.Equal(t, 1, <-ch.Chan()) + assert.Equal(t, 2, <-ch.Chan()) +} + +func TestSafeChannel_TryWrite_FailsAfterWaitableTriggered(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](5, ctx) + + // Cancel the context + cancel() + + // TryWrite should fail + err := ch.TryWrite(1) + assert.ErrorIs(t, err, ErrWaitableTriggered) +} + +func TestSafeChannel_Len(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](5, ctx) + + assert.Equal(t, 0, ch.Len()) + + require.NoError(t, ch.Write(1)) + assert.Equal(t, 1, ch.Len()) + + require.NoError(t, ch.Write(2)) + assert.Equal(t, 2, ch.Len()) + + <-ch.Chan() + assert.Equal(t, 1, ch.Len()) + + <-ch.Chan() + assert.Equal(t, 0, ch.Len()) +} + +func TestSafeChannel_Cap(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](10, ctx) + assert.Equal(t, 10, ch.Cap()) + + // Capacity doesn't change as we write + require.NoError(t, ch.Write(1)) + assert.Equal(t, 10, ch.Cap()) + + require.NoError(t, ch.Write(2)) + assert.Equal(t, 10, ch.Cap()) + + // Capacity doesn't change as we read + <-ch.Chan() + assert.Equal(t, 10, ch.Cap()) +} + +func TestSafeChannel_NegativeSize(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Creating a Channel with negative size should panic + assert.Panics(t, func() { + NewChannel[int](-5, ctx) + }, "NewChannel should panic when size is negative") +} + +func TestSafeChannel_Close_MultipleTimes(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](5, ctx) + + cancel() + <-ctx.Done() + + // Close multiple times should not panic + ch.Close() + ch.Close() + ch.Close() +} + +func TestSafeChannel_Close_ProperShutdownSequence(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](5, ctx) + + // Write some items + require.NoError(t, ch.Write(1)) + require.NoError(t, ch.Write(2)) + + // Proper shutdown sequence + cancel() + <-ctx.Done() + ch.Close() + + // Should still be able to read existing items + assert.Equal(t, 1, <-ch.Chan()) + assert.Equal(t, 2, <-ch.Chan()) + + // Channel should be closed now + val, ok := <-ch.Chan() + assert.False(t, ok, "channel should be closed") + assert.Equal(t, 0, val) +} + +func TestSafeChannel_ConcurrentWrites(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](1000, ctx) + + numGoroutines := 10 + numWrites := 100 + + var wg sync.WaitGroup + wg.Add(numGoroutines) + + // Launch multiple goroutines writing concurrently + for i := range numGoroutines { + go func(offset int) { + defer wg.Done() + for j := range numWrites { + err := ch.Write(offset*numWrites + j) + assert.NoError(t, err) + } + }(i) + } + + // Reads are possible after we call Close + wg.Wait() + cancel() + <-ctx.Done() + ch.Close() + + // Read all items in another goroutine + received := set.NewIntSet() + var readerWg sync.WaitGroup + readerWg.Add(1) + go func() { + defer readerWg.Done() + for range numGoroutines * numWrites { + val := <-ch.Chan() + received.Add(val) + } + }() + + readerWg.Wait() + + // Verify all items were received + assert.Len(t, received, numGoroutines*numWrites) +} + +func TestSafeChannel_ConcurrentWritesAndClose(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + // This test ensures there are no panics when writing and closing concurrently + for range 100 { + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](10, ctx) + + writeStarted := concurrency.NewSignal() + + var wg sync.WaitGroup + wg.Add(2) + + // Writer goroutine + go func() { + defer wg.Done() + for j := range 100 { + _ = ch.Write(j) + if j == 0 { + writeStarted.Signal() + } + } + }() + + // Closer goroutine - wait for writes to start, then close while writing + go func() { + defer wg.Done() + <-writeStarted.Done() + cancel() + <-ctx.Done() + ch.Close() + }() + + wg.Wait() + } +} + +func TestSafeChannel_WriteBlockedThenWaitableTriggered(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + ch := NewChannel[int](1, ctx) + + // Fill the channel + require.NoError(t, ch.Write(1)) + + // Start a write that will block + writeStarted := concurrency.NewSignal() + writeResult := make(chan error, 1) + go func() { + writeStarted.Signal() + err := ch.Write(2) + writeResult <- err + }() + + // Wait for write to start + <-writeStarted.Done() + + // Wait for the goroutine to become blocked + synctest.Wait() + + // Trigger the waitable while write is blocked + cancel() + + // The blocked write should return ErrWaitableTriggered + err := <-writeResult + assert.ErrorIs(t, err, ErrWaitableTriggered) + }) +} + +func TestSafeChannel_NewSafeChannel_PanicsOnNilWaitable(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + // Creating a Channel with a nil waitable should panic + assert.Panics(t, func() { + NewChannel[int](5, nil) + }, "NewChannel should panic when waitable is nil") +} + +func TestSafeChannel_Close_PanicsOnUntriggeredWaitable(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := NewChannel[int](5, ctx) + + // Calling Close without triggering the waitable should panic + assert.Panics(t, func() { + ch.Close() + }, "Close should panic when waitable has not been triggered") +} diff --git a/pkg/safe/errors.go b/pkg/safe/errors.go new file mode 100644 index 0000000000000..76a36f05c2f86 --- /dev/null +++ b/pkg/safe/errors.go @@ -0,0 +1,11 @@ +package safe + +import "github.com/pkg/errors" + +var ( + // ErrWaitableTriggered is returned when a waitable signal is triggered before or during a channel write operation. + ErrWaitableTriggered = errors.New("waitable was triggered") + + // ErrChannelFull is returned when attempting a non-blocking write to a full channel. + ErrChannelFull = errors.New("channel is full") +) diff --git a/sensor/common/pubsub/lane/default.go b/sensor/common/pubsub/lane/blocking.go similarity index 64% rename from sensor/common/pubsub/lane/default.go rename to sensor/common/pubsub/lane/blocking.go index a30a1a83e0513..70c3a6a09433e 100644 --- a/sensor/common/pubsub/lane/default.go +++ b/sensor/common/pubsub/lane/blocking.go @@ -5,20 +5,20 @@ import ( "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/errorhelpers" - "github.com/stackrox/rox/pkg/sync" + "github.com/stackrox/rox/pkg/safe" "github.com/stackrox/rox/sensor/common/pubsub" "github.com/stackrox/rox/sensor/common/pubsub/consumer" pubsubErrors "github.com/stackrox/rox/sensor/common/pubsub/errors" "github.com/stackrox/rox/sensor/common/pubsub/metrics" ) -type DefaultConfig struct { +type BlockingConfig struct { Config } -func WithDefaultLaneSize(size int) pubsub.LaneOption { +func WithBlockingLaneSize(size int) pubsub.LaneOption { return func(lane pubsub.Lane) { - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) if !ok { panic("cannot use default lane option for this type of lane") } @@ -29,9 +29,9 @@ func WithDefaultLaneSize(size int) pubsub.LaneOption { } } -func WithDefaultLaneConsumer(consumer pubsub.NewConsumer, opts ...pubsub.ConsumerOption) pubsub.LaneOption { +func WithBlockingLaneConsumer(consumer pubsub.NewConsumer, opts ...pubsub.ConsumerOption) pubsub.LaneOption { return func(lane pubsub.Lane) { - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) if !ok { panic("cannot use default lane option for this type of lane") } @@ -43,8 +43,8 @@ func WithDefaultLaneConsumer(consumer pubsub.NewConsumer, opts ...pubsub.Consume } } -func NewDefaultLane(id pubsub.LaneID, opts ...pubsub.LaneOption) *DefaultConfig { - return &DefaultConfig{ +func NewBlockingLane(id pubsub.LaneID, opts ...pubsub.LaneOption) *BlockingConfig { + return &BlockingConfig{ Config: Config{ id: id, opts: opts, @@ -53,8 +53,8 @@ func NewDefaultLane(id pubsub.LaneID, opts ...pubsub.LaneOption) *DefaultConfig } } -func (c *DefaultConfig) NewLane() pubsub.Lane { - lane := &defaultLane{ +func (c *BlockingConfig) NewLane() pubsub.Lane { + lane := &blockingLane{ Lane: Lane{ id: c.LaneID(), newConsumerFn: c.newConsumer, @@ -65,48 +65,35 @@ func (c *DefaultConfig) NewLane() pubsub.Lane { for _, opt := range c.opts { opt(lane) } - lane.ch = make(chan pubsub.Event, lane.size) + lane.ch = safe.NewChannel[pubsub.Event](lane.size, lane.stopper.LowLevel().GetStopRequestSignal()) go lane.run() return lane } -type defaultLane struct { +type blockingLane struct { Lane - mu sync.Mutex size int - ch chan pubsub.Event + ch *safe.Channel[pubsub.Event] stopper concurrency.Stopper } -func (l *defaultLane) Publish(event pubsub.Event) error { - // We need to lock here and nest two selects to avoid races stopping and - // publishing events - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.stopper.Flow().StopRequested(): +func (l *blockingLane) Publish(event pubsub.Event) error { + if err := l.ch.Write(event); err != nil { metrics.RecordPublishOperation(l.id, event.Topic(), metrics.PublishError) return errors.Wrap(pubsubErrors.NewPublishOnStoppedLaneErr(l.id), "unable to publish event") - default: - } - select { - case <-l.stopper.Flow().StopRequested(): - metrics.RecordPublishOperation(l.id, event.Topic(), metrics.PublishError) - return errors.Wrap(pubsubErrors.NewPublishOnStoppedLaneErr(l.id), "unable to publish event") - case l.ch <- event: - metrics.RecordPublishOperation(l.id, event.Topic(), metrics.Published) - metrics.SetQueueSize(l.id, len(l.ch)) - return nil } + metrics.RecordPublishOperation(l.id, event.Topic(), metrics.Published) + metrics.SetQueueSize(l.id, l.ch.Len()) + return nil } -func (l *defaultLane) run() { +func (l *blockingLane) run() { defer l.stopper.Flow().ReportStopped() for { select { case <-l.stopper.Flow().StopRequested(): return - case event, ok := <-l.ch: + case event, ok := <-l.ch.Chan(): if !ok { return } @@ -117,9 +104,9 @@ func (l *defaultLane) run() { } } -func (l *defaultLane) handleEvent(event pubsub.Event) error { +func (l *blockingLane) handleEvent(event pubsub.Event) error { defer func() { - metrics.SetQueueSize(l.id, len(l.ch)) + metrics.SetQueueSize(l.id, l.ch.Len()) }() l.consumerLock.RLock() @@ -144,7 +131,7 @@ func (l *defaultLane) handleEvent(event pubsub.Event) error { return errList.ToError() } -func (l *defaultLane) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsub.Topic, callback pubsub.EventCallback) error { +func (l *blockingLane) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsub.Topic, callback pubsub.EventCallback) error { if callback == nil { return errors.New("cannot register a 'nil' callback") } @@ -159,15 +146,11 @@ func (l *defaultLane) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsu return nil } -func (l *defaultLane) Stop() { +func (l *blockingLane) Stop() { l.stopper.Client().Stop() + // Wait for the run() goroutine to fully exit before closing the channel. + // This ensures an orderly shutdown where event processing is complete. <-l.stopper.Client().Stopped().Done() - concurrency.WithLock(&l.mu, func() { - if l.ch == nil { - return - } - close(l.ch) - l.ch = nil - }) + l.ch.Close() l.Lane.Stop() } diff --git a/sensor/common/pubsub/lane/default_test.go b/sensor/common/pubsub/lane/blocking_test.go similarity index 78% rename from sensor/common/pubsub/lane/default_test.go rename to sensor/common/pubsub/lane/blocking_test.go index c627d77f98ac0..15905aaaecba7 100644 --- a/sensor/common/pubsub/lane/default_test.go +++ b/sensor/common/pubsub/lane/blocking_test.go @@ -13,66 +13,66 @@ import ( "github.com/stretchr/testify/suite" ) -type defaultLaneSuite struct { +type blockingLaneSuite struct { suite.Suite } -func TestDefaultLane(t *testing.T) { - suite.Run(t, new(defaultLaneSuite)) +func TestBlockingLane(t *testing.T) { + suite.Run(t, new(blockingLaneSuite)) } -func (s *defaultLaneSuite) TestNewLaneOptions() { +func (s *blockingLaneSuite) TestNewLaneOptions() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("with default options", func() { - config := NewDefaultLane(pubsub.DefaultLane) + config := NewBlockingLane(pubsub.DefaultLane) assert.Equal(s.T(), pubsub.DefaultLane, config.LaneID()) lane := config.NewLane() assert.NotNil(s.T(), lane) defer lane.Stop() - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) require.True(s.T(), ok) - assert.Equal(s.T(), 0, cap(laneImpl.ch)) + assert.Equal(s.T(), 0, laneImpl.ch.Cap()) }) s.Run("with default lane size", func() { laneSize := 10 - config := NewDefaultLane(pubsub.DefaultLane, WithDefaultLaneSize(laneSize)) + config := NewBlockingLane(pubsub.DefaultLane, WithBlockingLaneSize(laneSize)) assert.Equal(s.T(), pubsub.DefaultLane, config.LaneID()) lane := config.NewLane() assert.NotNil(s.T(), lane) defer lane.Stop() - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) require.True(s.T(), ok) - assert.Equal(s.T(), laneSize, cap(laneImpl.ch)) + assert.Equal(s.T(), laneSize, laneImpl.ch.Cap()) }) s.Run("with negative lane size", func() { laneSize := -1 - config := NewDefaultLane(pubsub.DefaultLane, WithDefaultLaneSize(laneSize)) + config := NewBlockingLane(pubsub.DefaultLane, WithBlockingLaneSize(laneSize)) assert.Equal(s.T(), pubsub.DefaultLane, config.LaneID()) lane := config.NewLane() assert.NotNil(s.T(), lane) defer lane.Stop() - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) require.True(s.T(), ok) - assert.Equal(s.T(), 0, cap(laneImpl.ch)) + assert.Equal(s.T(), 0, laneImpl.ch.Cap()) }) s.Run("with custom consumer", func() { - config := NewDefaultLane(pubsub.DefaultLane, WithDefaultLaneConsumer(newTestConsumer)) + config := NewBlockingLane(pubsub.DefaultLane, WithBlockingLaneConsumer(newTestConsumer)) assert.Equal(s.T(), pubsub.DefaultLane, config.LaneID()) lane := config.NewLane() assert.NotNil(s.T(), lane) defer lane.Stop() - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) require.True(s.T(), ok) assert.NotNil(s.T(), laneImpl.newConsumerFn) assert.Len(s.T(), laneImpl.consumerOpts, 0) }) s.Run("with custom consumer and consumer options", func() { - config := NewDefaultLane(pubsub.DefaultLane, WithDefaultLaneConsumer(newTestConsumer, func(_ pubsub.Consumer) {})) + config := NewBlockingLane(pubsub.DefaultLane, WithBlockingLaneConsumer(newTestConsumer, func(_ pubsub.Consumer) {})) assert.Equal(s.T(), pubsub.DefaultLane, config.LaneID()) lane := config.NewLane() assert.NotNil(s.T(), lane) defer lane.Stop() - laneImpl, ok := lane.(*defaultLane) + laneImpl, ok := lane.(*blockingLane) require.True(s.T(), ok) assert.NotNil(s.T(), laneImpl.newConsumerFn) assert.Len(s.T(), laneImpl.consumerOpts, 1) @@ -107,50 +107,50 @@ func (lc *testLaneConfig) LaneID() pubsub.LaneID { return pubsub.DefaultLane } -func (s *defaultLaneSuite) TestOptionPanic() { +func (s *blockingLaneSuite) TestOptionPanic() { defer goleak.AssertNoGoroutineLeaks(s.T()) - s.Run("panic if WithDefaultLaneSize is used in a different lane", func() { + s.Run("panic if WithBlockingLaneSize is used in a different lane", func() { config := &testLaneConfig{ opts: []pubsub.LaneOption{ - WithDefaultLaneSize(10), + WithBlockingLaneSize(10), }, } s.Assert().Panics(func() { config.NewLane() }) }) - s.Run("panic if WithDefaultLaneConsumer is used in a different lane", func() { + s.Run("panic if WithBlockingLaneConsumer is used in a different lane", func() { config := &testLaneConfig{ opts: []pubsub.LaneOption{ - WithDefaultLaneConsumer(nil), + WithBlockingLaneConsumer(nil), }, } s.Assert().Panics(func() { config.NewLane() }) }) - s.Run("panic if a nil NewConsumer is passed to WithDefaultLaneConsumer", func() { - config := NewDefaultLane(pubsub.DefaultLane, WithDefaultLaneConsumer(nil)) + s.Run("panic if a nil NewConsumer is passed to WithBlockingLaneConsumer", func() { + config := NewBlockingLane(pubsub.DefaultLane, WithBlockingLaneConsumer(nil)) s.Assert().Panics(func() { config.NewLane() }) }) } -func (s *defaultLaneSuite) TestRegisterConsumer() { +func (s *blockingLaneSuite) TestRegisterConsumer() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("should error on nil callback", func() { - lane := NewDefaultLane(pubsub.DefaultLane).NewLane() + lane := NewBlockingLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) assert.Error(s.T(), lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, nil)) lane.Stop() }) } -func (s *defaultLaneSuite) TestPublish() { +func (s *blockingLaneSuite) TestPublish() { defer goleak.AssertNoGoroutineLeaks(s.T()) s.Run("publish with blocking consumer should block", func() { - lane := NewDefaultLane(pubsub.DefaultLane).NewLane() + lane := NewBlockingLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) unblockSig := concurrency.NewSignal() wg := sync.WaitGroup{} @@ -177,14 +177,14 @@ func (s *defaultLaneSuite) TestPublish() { wg.Wait() }) s.Run("publish with no consumer should not block", func() { - lane := NewDefaultLane(pubsub.DefaultLane).NewLane() + lane := NewBlockingLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) assert.NoError(s.T(), lane.Publish(&testEvent{})) assert.NoError(s.T(), lane.Publish(&testEvent{})) lane.Stop() }) s.Run("publish and consume", func() { - lane := NewDefaultLane(pubsub.DefaultLane).NewLane() + lane := NewBlockingLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) data := "some data" consumeSignal := concurrency.NewSignal() @@ -201,7 +201,7 @@ func (s *defaultLaneSuite) TestPublish() { lane.Stop() }) s.Run("stop should unblock publish", func() { - lane := NewDefaultLane(pubsub.DefaultLane).NewLane() + lane := NewBlockingLane(pubsub.DefaultLane).NewLane() assert.NotNil(s.T(), lane) unblockSig := concurrency.NewSignal() wg := sync.WaitGroup{} diff --git a/sensor/common/pubsub/lane/concurrent.go b/sensor/common/pubsub/lane/concurrent.go new file mode 100644 index 0000000000000..e126c35e470be --- /dev/null +++ b/sensor/common/pubsub/lane/concurrent.go @@ -0,0 +1,169 @@ +package lane + +import ( + "github.com/pkg/errors" + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/safe" + "github.com/stackrox/rox/sensor/common/pubsub" + "github.com/stackrox/rox/sensor/common/pubsub/consumer" + pubsubErrors "github.com/stackrox/rox/sensor/common/pubsub/errors" + "github.com/stackrox/rox/sensor/common/pubsub/metrics" +) + +type ConcurrentConfig struct { + Config +} + +func WithConcurrentLaneSize(size int) pubsub.LaneOption { + return func(lane pubsub.Lane) { + laneImpl, ok := lane.(*concurrentLane) + if !ok { + panic("attempted using concurrent lane option for a different lane type") + } + if size < 0 { + return + } + laneImpl.size = size + } +} + +func WithConcurrentLaneConsumer(consumer pubsub.NewConsumer, opts ...pubsub.ConsumerOption) pubsub.LaneOption { + return func(lane pubsub.Lane) { + laneImpl, ok := lane.(*concurrentLane) + if !ok { + panic("attempted using concurrent lane option for a different lane type") + } + if consumer == nil { + panic("cannot configure a 'nil' NewConsumer function") + } + laneImpl.newConsumerFn = consumer + laneImpl.consumerOpts = opts + } +} + +func NewConcurrentLane(id pubsub.LaneID, opts ...pubsub.LaneOption) *ConcurrentConfig { + return &ConcurrentConfig{ + Config: Config{ + id: id, + opts: opts, + newConsumer: consumer.NewDefaultConsumer, + }, + } +} + +func (c *ConcurrentConfig) NewLane() pubsub.Lane { + lane := &concurrentLane{ + Lane: Lane{ + id: c.LaneID(), + newConsumerFn: c.newConsumer, + consumers: make(map[pubsub.Topic][]pubsub.Consumer), + }, + stopper: concurrency.NewStopper(), + } + for _, opt := range c.opts { + opt(lane) + } + lane.ch = safe.NewChannel[pubsub.Event](lane.size, lane.stopper.LowLevel().GetStopRequestSignal()) + go lane.run() + return lane +} + +type concurrentLane struct { + Lane + size int + ch *safe.Channel[pubsub.Event] + stopper concurrency.Stopper +} + +func (l *concurrentLane) Publish(event pubsub.Event) error { + if err := l.ch.Write(event); err != nil { + metrics.RecordPublishOperation(l.id, event.Topic(), metrics.PublishError) + return errors.Wrap(pubsubErrors.NewPublishOnStoppedLaneErr(l.id), "unable to publish event") + } + metrics.RecordPublishOperation(l.id, event.Topic(), metrics.Published) + metrics.SetQueueSize(l.id, l.ch.Len()) + return nil +} + +func (l *concurrentLane) run() { + defer l.stopper.Flow().ReportStopped() + for { + // Priority 1: Check if stop requested + select { + case <-l.stopper.Flow().StopRequested(): + return + default: + } + // Priority 2: Read event, but respect stop during blocking read + select { + case <-l.stopper.Flow().StopRequested(): + return + case event, ok := <-l.ch.Chan(): + if !ok { + return + } + l.handleEvent(event) + } + } +} + +func (l *concurrentLane) getConsumersByTopic(topic pubsub.Topic) ([]pubsub.Consumer, error) { + l.consumerLock.RLock() + defer l.consumerLock.RUnlock() + consumers, ok := l.consumers[topic] + if !ok { + return nil, errors.Wrap(pubsubErrors.NewConsumersNotFoundForTopicErr(topic, l.id), "unable to handle event") + } + return consumers, nil +} + +func (l *concurrentLane) handleEvent(event pubsub.Event) { + defer metrics.SetQueueSize(l.id, l.ch.Len()) + consumers, err := l.getConsumersByTopic(event.Topic()) + if err != nil { + log.Errorf("unable to handle event: %v", err) + metrics.RecordConsumerOperation(l.id, event.Topic(), pubsub.NoConsumers, metrics.NoConsumers) + return + } + for _, c := range consumers { + errC := c.Consume(l.stopper.Client().Stopped(), event) + // Spawning go routine here to not block other consumers + go l.handleConsumerError(errC) + } +} + +func (l *concurrentLane) handleConsumerError(errC <-chan error) { + // This blocks until the consumer finishes the processing + // TODO: Consider adding a timeout here + select { + case err := <-errC: + if err != nil { + // TODO: consider adding a callback to inform of the error + log.Errorf("unable to handle event: %v", err) + } + case <-l.stopper.Flow().StopRequested(): + } +} + +func (l *concurrentLane) RegisterConsumer(consumerID pubsub.ConsumerID, topic pubsub.Topic, callback pubsub.EventCallback) error { + if callback == nil { + return errors.New("cannot register a 'nil' callback") + } + c, err := l.newConsumerFn(l.id, topic, consumerID, callback, l.consumerOpts...) + if err != nil { + return errors.Wrap(err, "creating the consumer") + } + l.consumerLock.Lock() + defer l.consumerLock.Unlock() + l.consumers[topic] = append(l.consumers[topic], c) + return nil +} + +func (l *concurrentLane) Stop() { + l.stopper.Client().Stop() + // Wait for the run() goroutine to fully exit before closing the channel. + // This ensures an orderly shutdown where event processing is complete. + <-l.stopper.Client().Stopped().Done() + l.ch.Close() + l.Lane.Stop() +} diff --git a/sensor/common/pubsub/lane/concurrent_test.go b/sensor/common/pubsub/lane/concurrent_test.go new file mode 100644 index 0000000000000..ff96addef6f8a --- /dev/null +++ b/sensor/common/pubsub/lane/concurrent_test.go @@ -0,0 +1,408 @@ +package lane + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/testutils/goleak" + "github.com/stackrox/rox/sensor/common/pubsub" + "github.com/stackrox/rox/sensor/common/pubsub/metrics" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/prometheus/client_golang/prometheus/testutil" +) + +func TestNewLaneOptions(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("with default options", func(t *testing.T) { + config := NewConcurrentLane(pubsub.DefaultLane) + assert.Equal(t, pubsub.DefaultLane, config.LaneID()) + lane := config.NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.Equal(t, 0, laneImpl.ch.Cap()) + }) + t.Run("with custom lane size", func(t *testing.T) { + laneSize := 10 + config := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneSize(laneSize)) + assert.Equal(t, pubsub.DefaultLane, config.LaneID()) + lane := config.NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.Equal(t, laneSize, laneImpl.ch.Cap()) + }) + t.Run("with negative lane size", func(t *testing.T) { + laneSize := -1 + config := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneSize(laneSize)) + assert.Equal(t, pubsub.DefaultLane, config.LaneID()) + lane := config.NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.Equal(t, 0, laneImpl.ch.Cap()) + }) + t.Run("with custom consumer", func(t *testing.T) { + config := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneConsumer(newTestConsumer)) + assert.Equal(t, pubsub.DefaultLane, config.LaneID()) + lane := config.NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.NotNil(t, laneImpl.newConsumerFn) + assert.Len(t, laneImpl.consumerOpts, 0) + }) + t.Run("with custom consumer and consumer options", func(t *testing.T) { + config := NewConcurrentLane( + pubsub.DefaultLane, + WithConcurrentLaneConsumer(newTestConsumer, func(_ pubsub.Consumer) {}), + ) + assert.Equal(t, pubsub.DefaultLane, config.LaneID()) + lane := config.NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.NotNil(t, laneImpl.newConsumerFn) + assert.Len(t, laneImpl.consumerOpts, 1) + }) +} + +func TestOptionPanic(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("panic if WithConcurrentLaneSize is used in a different lane", func(t *testing.T) { + config := &testLaneConfig{ + opts: []pubsub.LaneOption{ + WithConcurrentLaneSize(10), + }, + } + assert.Panics(t, func() { + config.NewLane() + }) + }) + t.Run("panic if WithConcurrentLaneConsumer is used in a different lane", func(t *testing.T) { + config := &testLaneConfig{ + opts: []pubsub.LaneOption{ + WithConcurrentLaneConsumer(nil), + }, + } + assert.Panics(t, func() { + config.NewLane() + }) + }) + t.Run("panic if a nil NewConsumer is passed to WithConcurrentLaneConsumer", func(t *testing.T) { + config := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneConsumer(nil)) + assert.Panics(t, func() { + config.NewLane() + }) + }) +} + +func TestRegisterConsumer(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("should error on nil callback", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + assert.Error(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, nil)) + lane.Stop() + }) + t.Run("should successfully register consumer", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + return nil + })) + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + assert.Len(t, laneImpl.consumers[pubsub.DefaultTopic], 1) + }) +} + +func TestPublish(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("publish with no consumer should not block", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneSize(5)).NewLane() + assert.NotNil(t, lane) + assert.NoError(t, lane.Publish(&concurrentTestEvent{})) + assert.NoError(t, lane.Publish(&concurrentTestEvent{})) + lane.Stop() + }) + t.Run("publish and consume concurrently", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + data := "some data" + consumeSignal := concurrency.NewSignal() + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, + assertInConcurrentCallback(t, func(t *testing.T, event pubsub.Event) error { + defer consumeSignal.Signal() + eventImpl, ok := event.(*concurrentTestEvent) + require.True(t, ok) + assert.Equal(t, data, eventImpl.data) + return nil + }))) + assert.NoError(t, lane.Publish(&concurrentTestEvent{data: data})) + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Event should be consumed within timeout") + case <-consumeSignal.Done(): + } + lane.Stop() + }) + t.Run("publish should not block with slow consumer (concurrent processing)", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneSize(5)).NewLane() + assert.NotNil(t, lane) + unblockSig := concurrency.NewSignal() + numEvents := 3 + doneCh := make(chan struct{}, numEvents) + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + <-unblockSig.Done() + doneCh <- struct{}{} + return nil + })) + // Publish multiple events - they should not block even though consumer is slow + for i := 0; i < numEvents; i++ { + require.NoError(t, lane.Publish(&concurrentTestEvent{data: "event"})) + } + unblockSig.Signal() + // Wait for all events to be processed + for i := 0; i < numEvents; i++ { + select { + case <-doneCh: + case <-time.After(1 * time.Second): + t.Fatalf("Event %d was not processed within timeout", i) + } + } + lane.Stop() + }) + t.Run("stop should prevent new publishes", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + return nil + })) + lane.Stop() + err := lane.Publish(&concurrentTestEvent{}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "publishing on stopped lane") + }) + t.Run("multiple consumers receive same event", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + consumer1Signal := concurrency.NewSignal() + consumer2Signal := concurrency.NewSignal() + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + consumer1Signal.Signal() + return nil + })) + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + consumer2Signal.Signal() + return nil + })) + assert.NoError(t, lane.Publish(&concurrentTestEvent{data: "broadcast"})) + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("First consumer should receive event within timeout") + case <-consumer1Signal.Done(): + } + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Second consumer should receive event within timeout") + case <-consumer2Signal.Done(): + } + lane.Stop() + }) + t.Run("parallel publish operations", func(t *testing.T) { + numPublishers := 10 + eventsPerPublisher := 10 + laneSize := numPublishers * eventsPerPublisher + lane := NewConcurrentLane(pubsub.DefaultLane, WithConcurrentLaneSize(laneSize)).NewLane() + assert.NotNil(t, lane) + defer lane.Stop() + + receivedEvents := make(chan string, numPublishers*eventsPerPublisher) + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(event pubsub.Event) error { + eventImpl, ok := event.(*concurrentTestEvent) + require.True(t, ok) + receivedEvents <- eventImpl.data + return nil + })) + + // Start signal to coordinate parallel publishers + startSignal := concurrency.NewSignal() + + // Spawn multiple publisher goroutines + for i := 0; i < numPublishers; i++ { + publisherID := i + go func() { + <-startSignal.Done() + for j := 0; j < eventsPerPublisher; j++ { + event := &concurrentTestEvent{ + data: fmt.Sprintf("publisher_%d_event_%d", publisherID, j), + } + require.NoError(t, lane.Publish(event)) + } + }() + } + + // Signal all publishers to start publishing simultaneously + startSignal.Signal() + + // Wait for all events to be consumed + totalEvents := numPublishers * eventsPerPublisher + receivedCount := 0 + for receivedCount < totalEvents { + select { + case <-receivedEvents: + receivedCount++ + case <-time.After(2 * time.Second): + t.Fatalf("Only received %d/%d events within timeout", receivedCount, totalEvents) + } + } + + // Verify all events were received + assert.Equal(t, totalEvents, receivedCount) + }) +} + +func TestErrorHandling(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("consumer error should be logged", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + expectedErr := errors.New("consumer error") + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, pubsub.DefaultTopic, func(_ pubsub.Event) error { + return expectedErr + })) + initialCounter := testutil.ToFloat64(metrics.LaneConsumerOperations.WithLabelValues( + pubsub.DefaultLane.String(), + pubsub.DefaultTopic.String(), + pubsub.DefaultConsumer.String(), + metrics.ConsumerError.String())) + assert.NoError(t, lane.Publish(&concurrentTestEvent{data: "error event"})) + assert.Eventually(t, func() bool { + counter := metrics.LaneConsumerOperations.WithLabelValues( + pubsub.DefaultLane.String(), + pubsub.DefaultTopic.String(), + pubsub.DefaultConsumer.String(), + metrics.ConsumerError.String()) + return testutil.ToFloat64(counter) > initialCounter + }, 100*time.Millisecond, 10*time.Millisecond) + lane.Stop() + }) + t.Run("publish to topic with no consumers should log error", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + unknownTopic := pubsub.Topic(999) + + // Register a barrier consumer on a different topic to ensure event processing completes + barrierTopic := pubsub.Topic(998) + barrierDone := make(chan struct{}) + assert.NoError(t, lane.RegisterConsumer(pubsub.DefaultConsumer, barrierTopic, func(_ pubsub.Event) error { + close(barrierDone) + return nil + })) + + initialCounter := testutil.ToFloat64(metrics.LaneConsumerOperations.WithLabelValues( + pubsub.DefaultLane.String(), + unknownTopic.String(), + pubsub.NoConsumers.String(), + metrics.NoConsumers.String())) + + // Publish to topic with no consumers + assert.NoError(t, lane.Publish(&concurrentTestEvent{customTopic: &unknownTopic})) + + // Publish barrier event - when this completes, we know the previous event was processed + assert.NoError(t, lane.Publish(&concurrentTestEvent{customTopic: &barrierTopic})) + select { + case <-time.After(1 * time.Second): + t.Fatal("Barrier event not processed within timeout") + case <-barrierDone: + } + + // Verify metrics were updated + counter := metrics.LaneConsumerOperations.WithLabelValues( + pubsub.DefaultLane.String(), + unknownTopic.String(), + pubsub.NoConsumers.String(), + metrics.NoConsumers.String()) + assert.Greater(t, testutil.ToFloat64(counter), initialCounter) + lane.Stop() + }) +} + +func TestStop(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + t.Run("stop should clean up resources", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + lane.Stop() + laneImpl, ok := lane.(*concurrentLane) + require.True(t, ok) + + _, ok = <-laneImpl.ch.Chan() + assert.False(t, ok) + }) + t.Run("stop should stop all consumers", func(t *testing.T) { + lane := NewConcurrentLane(pubsub.DefaultLane).NewLane() + assert.NotNil(t, lane) + consumerStopped := false + mockConsumer := &mockConsumer{ + stopFn: func() { + consumerStopped = true + }, + } + laneImpl := lane.(*concurrentLane) + laneImpl.consumers[pubsub.DefaultTopic] = []pubsub.Consumer{mockConsumer} + lane.Stop() + assert.True(t, consumerStopped) + }) +} + +func assertInConcurrentCallback(t *testing.T, assertion func(*testing.T, pubsub.Event) error) pubsub.EventCallback { + return func(event pubsub.Event) error { + return assertion(t, event) + } +} + +// testEvent with topic field for flexibility in tests +type concurrentTestEvent struct { + data string + customTopic *pubsub.Topic +} + +func (t *concurrentTestEvent) Topic() pubsub.Topic { + if t.customTopic != nil { + return *t.customTopic + } + return pubsub.DefaultTopic +} + +func (t *concurrentTestEvent) Lane() pubsub.LaneID { + return pubsub.DefaultLane +} + +type mockConsumer struct { + stopFn func() +} + +func (m *mockConsumer) Consume(_ concurrency.Waitable, _ pubsub.Event) <-chan error { + errC := make(chan error) + close(errC) + return errC +} + +func (m *mockConsumer) Stop() { + if m.stopFn != nil { + m.stopFn() + } +} diff --git a/sensor/common/pubsub/metrics/metrics.go b/sensor/common/pubsub/metrics/metrics.go index 6e9e67913a3b1..8290665eb34f2 100644 --- a/sensor/common/pubsub/metrics/metrics.go +++ b/sensor/common/pubsub/metrics/metrics.go @@ -18,9 +18,9 @@ var ( Help: "Total number of pubsub lane publish operations by lane, topic, and operation type", }, []string{"lane_id", "topic", "operation"}) - // laneConsumerOperations tracks all publish operations across lanes. + // LaneConsumerOperations tracks all publish operations across lanes. // Operations: success, error, no_consumers - laneConsumerOperations = prometheus.NewCounterVec(prometheus.CounterOpts{ + LaneConsumerOperations = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: metrics.PrometheusNamespace, Subsystem: metrics.SensorSubsystem.String(), Name: "pubsub_lane_consumer_operations_total", @@ -58,7 +58,7 @@ func RecordPublishOperation(laneID pubsub.LaneID, topic pubsub.Topic, operation } func RecordConsumerOperation(laneID pubsub.LaneID, topic pubsub.Topic, consumerID pubsub.ConsumerID, operation Operation) { - laneConsumerOperations.WithLabelValues(laneID.String(), topic.String(), consumerID.String(), operation.String()).Inc() + LaneConsumerOperations.WithLabelValues(laneID.String(), topic.String(), consumerID.String(), operation.String()).Inc() } func SetQueueSize(laneID pubsub.LaneID, size int) { @@ -76,7 +76,7 @@ func RecordConsumerCount(laneID pubsub.LaneID, topic pubsub.Topic, count int) { func init() { prometheus.MustRegister( lanePublishOperations, - laneConsumerOperations, + LaneConsumerOperations, laneQueueSize, laneEventProcessingDuration, consumersCurrent, diff --git a/sensor/kubernetes/sensor/sensor.go b/sensor/kubernetes/sensor/sensor.go index bdda38ec3a500..0a67c4f7f2956 100644 --- a/sensor/kubernetes/sensor/sensor.go +++ b/sensor/kubernetes/sensor/sensor.go @@ -76,8 +76,8 @@ func CreateSensor(cfg *CreateOptions) (*sensor.Sensor, error) { var err error internalMessageDispatcher, err = pubsubDispatcher.NewDispatcher(pubsubDispatcher.WithLaneConfigs( []pubsub.LaneConfig{ - lane.NewDefaultLane(pubsub.KubernetesDispatcherEventLane), - lane.NewDefaultLane(pubsub.FromCentralResolverEventLane), + lane.NewBlockingLane(pubsub.KubernetesDispatcherEventLane), + lane.NewBlockingLane(pubsub.FromCentralResolverEventLane), }, )) if err != nil { From 6555d8e6d37bf3c3cd4deea57e8bc34ed56584fa Mon Sep 17 00:00:00 2001 From: Cong Du Date: Wed, 4 Feb 2026 08:09:33 -0800 Subject: [PATCH 115/232] chore(upgrade): Bump 4.8 previous version in upgrade test to 4.8.8 (#18829) --- tests/upgrade/postgres_run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/upgrade/postgres_run.sh b/tests/upgrade/postgres_run.sh index a91cc2ead5af5..234731012c814 100755 --- a/tests/upgrade/postgres_run.sh +++ b/tests/upgrade/postgres_run.sh @@ -10,7 +10,7 @@ TEST_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)" EARLIER_TAG="4.6.2" EARLIER_SHA="ecff2a443c8b9a2dc7bf606162da89da81dd8e9e" CURRENT_TAG="$(make --quiet --no-print-directory tag)" -PREVIOUS_RELEASES=("4.6.10" "4.7.9" "4.8.7" "4.9.2") +PREVIOUS_RELEASES=("4.6.10" "4.7.9" "4.8.8" "4.9.2") # shellcheck source=../../scripts/lib.sh source "$TEST_ROOT/scripts/lib.sh" From 3ed9868aa8d0c64858afb38ad883f3aa617477af Mon Sep 17 00:00:00 2001 From: Alex Vulaj Date: Wed, 4 Feb 2026 12:05:56 -0500 Subject: [PATCH 116/232] ROX-321249: Add cluster_label and namespace_label to Scope proto (#18631) --- central/graphql/resolvers/generated.go | 12 ++ central/policy/customresource/policy.go | 16 +- central/policy/service/validator.go | 21 +- central/policy/service/validator_test.go | 92 +++++++++ generated/api/v1/alert_service.swagger.json | 7 + .../api/v1/detection_service.swagger.json | 7 + generated/api/v1/policy_service.swagger.json | 7 + generated/storage/scope.pb.go | 46 +++-- generated/storage/scope_vtproto.pb.go | 180 ++++++++++++++++++ pkg/scopecomp/scope_test.go | 44 +++++ proto/storage/proto.lock | 10 + proto/storage/scope.proto | 4 +- 12 files changed, 424 insertions(+), 22 deletions(-) diff --git a/central/graphql/resolvers/generated.go b/central/graphql/resolvers/generated.go index 1c073d5911daa..5d44153f28a8a 100644 --- a/central/graphql/resolvers/generated.go +++ b/central/graphql/resolvers/generated.go @@ -1270,8 +1270,10 @@ func registerGeneratedTypes(builder generator.SchemaBuilder) { })) utils.Must(builder.AddType("Scope", []string{ "cluster: String!", + "clusterLabel: Scope_Label", "label: Scope_Label", "namespace: String!", + "namespaceLabel: Scope_Label", })) utils.Must(builder.AddType("ScopeObject", []string{ "id: ID!", @@ -13846,6 +13848,11 @@ func (resolver *scopeResolver) Cluster(ctx context.Context) string { return value } +func (resolver *scopeResolver) ClusterLabel(ctx context.Context) (*scope_LabelResolver, error) { + value := resolver.data.GetClusterLabel() + return resolver.root.wrapScope_Label(value, true, nil) +} + func (resolver *scopeResolver) Label(ctx context.Context) (*scope_LabelResolver, error) { value := resolver.data.GetLabel() return resolver.root.wrapScope_Label(value, true, nil) @@ -13856,6 +13863,11 @@ func (resolver *scopeResolver) Namespace(ctx context.Context) string { return value } +func (resolver *scopeResolver) NamespaceLabel(ctx context.Context) (*scope_LabelResolver, error) { + value := resolver.data.GetNamespaceLabel() + return resolver.root.wrapScope_Label(value, true, nil) +} + type scopeObjectResolver struct { ctx context.Context root *Resolver diff --git a/central/policy/customresource/policy.go b/central/policy/customresource/policy.go index e0f2211cf9dfd..dc44d8f83fb77 100644 --- a/central/policy/customresource/policy.go +++ b/central/policy/customresource/policy.go @@ -9,9 +9,11 @@ import ( // Scope represents storage.Scope in the Custom Resource. type Scope struct { - Cluster string `yaml:",omitempty"` - Namespace string `yaml:",omitempty"` - Label *storage.Scope_Label `yaml:",omitempty"` + Cluster string `yaml:",omitempty"` + Namespace string `yaml:",omitempty"` + Label *storage.Scope_Label `yaml:",omitempty"` + ClusterLabel *storage.Scope_Label `yaml:",omitempty"` + NamespaceLabel *storage.Scope_Label `yaml:",omitempty"` } // convertScope Converts storage.Scope to *Scope @@ -21,9 +23,11 @@ func convertScope(p *storage.Scope) *Scope { } return &Scope{ - Cluster: p.Cluster, - Namespace: p.Namespace, - Label: p.Label, + Cluster: p.Cluster, + Namespace: p.Namespace, + Label: p.Label, + ClusterLabel: p.ClusterLabel, + NamespaceLabel: p.NamespaceLabel, } } diff --git a/central/policy/service/validator.go b/central/policy/service/validator.go index de2af721b1128..8c8a9e091fe0d 100644 --- a/central/policy/service/validator.go +++ b/central/policy/service/validator.go @@ -196,7 +196,7 @@ func (s *policyValidator) validateEventSource(policy *storage.Policy) error { } func validateNoLabelsInScopeForAuditEvent(scope *storage.Scope, context string) error { - if scope.GetLabel() != nil { + if scope.GetLabel() != nil || (features.LabelBasedPolicyScoping.Enabled() && (scope.GetClusterLabel() != nil || scope.GetNamespaceLabel() != nil)) { return errors.Errorf("labels in `%s` section are not permitted for audit log events based policies", context) } return nil @@ -321,9 +321,26 @@ func (s *policyValidator) validateDeploymentExclusion(exclusion *storage.Exclusi } func (s *policyValidator) validateScope(scope *storage.Scope) error { - if scope.GetCluster() == "" && scope.GetNamespace() == "" && scope.GetLabel() == nil { + if scope.GetCluster() == "" && scope.GetNamespace() == "" && scope.GetLabel() == nil && scope.GetClusterLabel() == nil && scope.GetNamespaceLabel() == nil { return errors.New("scope must have at least one field populated") } + // Reject cluster_label and namespace_label if feature flag is disabled + if !features.LabelBasedPolicyScoping.Enabled() { + if scope.GetClusterLabel() != nil { + return errors.New("cluster_label field requires feature flag ROX_LABEL_BASED_POLICY_SCOPING to be enabled") + } + if scope.GetNamespaceLabel() != nil { + return errors.New("namespace_label field requires feature flag ROX_LABEL_BASED_POLICY_SCOPING to be enabled") + } + } + // Cluster and cluster_label are mutually exclusive + if scope.GetCluster() != "" && scope.GetClusterLabel() != nil { + return errors.New("scope cannot have both 'cluster' and 'cluster_label' fields populated") + } + // Namespace and namespace_label are mutually exclusive + if scope.GetNamespace() != "" && scope.GetNamespaceLabel() != nil { + return errors.New("scope cannot have both 'namespace' and 'namespace_label' fields populated") + } if _, err := scopecomp.CompileScope(scope); err != nil { return errors.Wrap(err, "could not compile scope") } diff --git a/central/policy/service/validator_test.go b/central/policy/service/validator_test.go index 0b3a0e81a3f46..1c515c7247f54 100644 --- a/central/policy/service/validator_test.go +++ b/central/policy/service/validator_test.go @@ -1227,3 +1227,95 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { }) } } + +func (s *PolicyValidatorTestSuite) TestValidateScope() { + testutils.MustUpdateFeature(s.T(), features.LabelBasedPolicyScoping, true) + defer testutils.MustUpdateFeature(s.T(), features.LabelBasedPolicyScoping, false) + + testCases := []struct { + description string + scope *storage.Scope + errExpected bool + }{ + { + description: "cluster and cluster_label are mutually exclusive", + scope: &storage.Scope{ + Cluster: "cluster1", + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + errExpected: true, + }, + { + description: "namespace and namespace_label are mutually exclusive", + scope: &storage.Scope{ + Namespace: "default", + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + errExpected: true, + }, + { + description: "cluster_label alone is valid", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + errExpected: false, + }, + { + description: "namespace_label alone is valid", + scope: &storage.Scope{ + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + errExpected: false, + }, + { + description: "cluster and namespace_label together is valid", + scope: &storage.Scope{ + Cluster: "cluster1", + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + errExpected: false, + }, + { + description: "cluster_label and namespace together is valid", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + Namespace: "default", + }, + errExpected: false, + }, + { + description: "empty scope is invalid", + scope: &storage.Scope{}, + errExpected: true, + }, + } + + for _, tc := range testCases { + s.T().Run(tc.description, func(t *testing.T) { + err := s.validator.validateScope(tc.scope) + if tc.errExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/generated/api/v1/alert_service.swagger.json b/generated/api/v1/alert_service.swagger.json index 9ae883792eb35..5cc89649aa177 100644 --- a/generated/api/v1/alert_service.swagger.json +++ b/generated/api/v1/alert_service.swagger.json @@ -1746,6 +1746,13 @@ "type": "string" }, "label": { + "$ref": "#/definitions/storageScopeLabel", + "title": "Deployment label." + }, + "clusterLabel": { + "$ref": "#/definitions/storageScopeLabel" + }, + "namespaceLabel": { "$ref": "#/definitions/storageScopeLabel" } } diff --git a/generated/api/v1/detection_service.swagger.json b/generated/api/v1/detection_service.swagger.json index ba16f7d267757..babea44237b88 100644 --- a/generated/api/v1/detection_service.swagger.json +++ b/generated/api/v1/detection_service.swagger.json @@ -1535,6 +1535,13 @@ "type": "string" }, "label": { + "$ref": "#/definitions/storageScopeLabel", + "title": "Deployment label." + }, + "clusterLabel": { + "$ref": "#/definitions/storageScopeLabel" + }, + "namespaceLabel": { "$ref": "#/definitions/storageScopeLabel" } } diff --git a/generated/api/v1/policy_service.swagger.json b/generated/api/v1/policy_service.swagger.json index 94c1cd548bfd7..628389e01e3ad 100644 --- a/generated/api/v1/policy_service.swagger.json +++ b/generated/api/v1/policy_service.swagger.json @@ -1177,6 +1177,13 @@ "type": "string" }, "label": { + "$ref": "#/definitions/storageScopeLabel", + "title": "Deployment label." + }, + "clusterLabel": { + "$ref": "#/definitions/storageScopeLabel" + }, + "namespaceLabel": { "$ref": "#/definitions/storageScopeLabel" } } diff --git a/generated/storage/scope.pb.go b/generated/storage/scope.pb.go index b2edd5588411b..5810972bc89fe 100644 --- a/generated/storage/scope.pb.go +++ b/generated/storage/scope.pb.go @@ -22,12 +22,14 @@ const ( ) type Scope struct { - state protoimpl.MessageState `protogen:"open.v1"` - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` - Label *Scope_Label `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` + Label *Scope_Label `protobuf:"bytes,3,opt,name=label,proto3" json:"label,omitempty" crYaml:",omitempty"` // Deployment label. @gotags: crYaml:",omitempty"` + ClusterLabel *Scope_Label `protobuf:"bytes,4,opt,name=cluster_label,json=clusterLabel,proto3" json:"cluster_label,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` + NamespaceLabel *Scope_Label `protobuf:"bytes,5,opt,name=namespace_label,json=namespaceLabel,proto3" json:"namespace_label,omitempty" crYaml:",omitempty"` // @gotags: crYaml:",omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Scope) Reset() { @@ -81,6 +83,20 @@ func (x *Scope) GetLabel() *Scope_Label { return nil } +func (x *Scope) GetClusterLabel() *Scope_Label { + if x != nil { + return x.ClusterLabel + } + return nil +} + +func (x *Scope) GetNamespaceLabel() *Scope_Label { + if x != nil { + return x.NamespaceLabel + } + return nil +} + type Scope_Label struct { state protoimpl.MessageState `protogen:"open.v1"` Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -137,11 +153,13 @@ var File_storage_scope_proto protoreflect.FileDescriptor const file_storage_scope_proto_rawDesc = "" + "\n" + - "\x13storage/scope.proto\x12\astorage\"\x9c\x01\n" + + "\x13storage/scope.proto\x12\astorage\"\x96\x02\n" + "\x05Scope\x12\x18\n" + "\acluster\x18\x01 \x01(\tR\acluster\x12\x1c\n" + "\tnamespace\x18\x02 \x01(\tR\tnamespace\x12*\n" + - "\x05label\x18\x03 \x01(\v2\x14.storage.Scope.LabelR\x05label\x1a/\n" + + "\x05label\x18\x03 \x01(\v2\x14.storage.Scope.LabelR\x05label\x129\n" + + "\rcluster_label\x18\x04 \x01(\v2\x14.storage.Scope.LabelR\fclusterLabel\x12=\n" + + "\x0fnamespace_label\x18\x05 \x01(\v2\x14.storage.Scope.LabelR\x0enamespaceLabel\x1a/\n" + "\x05Label\x12\x10\n" + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + "\x05value\x18\x02 \x01(\tR\x05valueB.\n" + @@ -166,11 +184,13 @@ var file_storage_scope_proto_goTypes = []any{ } var file_storage_scope_proto_depIdxs = []int32{ 1, // 0: storage.Scope.label:type_name -> storage.Scope.Label - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 1, // 1: storage.Scope.cluster_label:type_name -> storage.Scope.Label + 1, // 2: storage.Scope.namespace_label:type_name -> storage.Scope.Label + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_storage_scope_proto_init() } diff --git a/generated/storage/scope_vtproto.pb.go b/generated/storage/scope_vtproto.pb.go index 2cbd8be4fb899..eaf8b609d99e4 100644 --- a/generated/storage/scope_vtproto.pb.go +++ b/generated/storage/scope_vtproto.pb.go @@ -46,6 +46,8 @@ func (m *Scope) CloneVT() *Scope { r.Cluster = m.Cluster r.Namespace = m.Namespace r.Label = m.Label.CloneVT() + r.ClusterLabel = m.ClusterLabel.CloneVT() + r.NamespaceLabel = m.NamespaceLabel.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -94,6 +96,12 @@ func (this *Scope) EqualVT(that *Scope) bool { if !this.Label.EqualVT(that.Label) { return false } + if !this.ClusterLabel.EqualVT(that.ClusterLabel) { + return false + } + if !this.NamespaceLabel.EqualVT(that.NamespaceLabel) { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -181,6 +189,26 @@ func (m *Scope) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.NamespaceLabel != nil { + size, err := m.NamespaceLabel.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.ClusterLabel != nil { + size, err := m.ClusterLabel.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } if m.Label != nil { size, err := m.Label.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -244,6 +272,14 @@ func (m *Scope) SizeVT() (n int) { l = m.Label.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.ClusterLabel != nil { + l = m.ClusterLabel.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.NamespaceLabel != nil { + l = m.NamespaceLabel.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -492,6 +528,78 @@ func (m *Scope) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLabel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterLabel == nil { + m.ClusterLabel = &Scope_Label{} + } + if err := m.ClusterLabel.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceLabel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceLabel == nil { + m.NamespaceLabel = &Scope_Label{} + } + if err := m.NamespaceLabel.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -774,6 +882,78 @@ func (m *Scope) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterLabel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterLabel == nil { + m.ClusterLabel = &Scope_Label{} + } + if err := m.ClusterLabel.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceLabel", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceLabel == nil { + m.NamespaceLabel = &Scope_Label{} + } + if err := m.NamespaceLabel.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/scopecomp/scope_test.go b/pkg/scopecomp/scope_test.go index 6b905925befc2..1e44f7904d694 100644 --- a/pkg/scopecomp/scope_test.go +++ b/pkg/scopecomp/scope_test.go @@ -140,6 +140,50 @@ func TestWithinScope(t *testing.T) { }, result: true, }, + { + name: "scope with cluster_label", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + }, + result: true, + }, + { + name: "scope with namespace_label", + scope: &storage.Scope{ + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + deployment: &storage.Deployment{ + Namespace: "default", + }, + result: true, + }, + { + name: "scope with cluster_label and namespace_label", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + Namespace: "default", + }, + result: true, + }, } for _, test := range subtests { cs, err := CompileScope(test.scope) diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index d777a4ae0e09c..7148a2ae6383e 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -16514,6 +16514,16 @@ "id": 3, "name": "label", "type": "Label" + }, + { + "id": 4, + "name": "cluster_label", + "type": "Label" + }, + { + "id": 5, + "name": "namespace_label", + "type": "Label" } ], "messages": [ diff --git a/proto/storage/scope.proto b/proto/storage/scope.proto index 224a363429953..f857c0c72ede4 100644 --- a/proto/storage/scope.proto +++ b/proto/storage/scope.proto @@ -12,5 +12,7 @@ message Scope { string key = 1; string value = 2; } - Label label = 3; // @gotags: crYaml:",omitempty"` + Label label = 3; // Deployment label. @gotags: crYaml:",omitempty"` + Label cluster_label = 4; // @gotags: crYaml:",omitempty"` + Label namespace_label = 5; // @gotags: crYaml:",omitempty"` } From a57b0a5bc380df45f72b891d79796ec0d67f0f65 Mon Sep 17 00:00:00 2001 From: Yann Brillouet <91869377+rhybrillou@users.noreply.github.com> Date: Wed, 4 Feb 2026 20:24:45 +0100 Subject: [PATCH 117/232] test: Bump opensearch version for performance tests (#18839) --- tests/performance/scripts/opensearch-upload/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/performance/scripts/opensearch-upload/requirements.txt b/tests/performance/scripts/opensearch-upload/requirements.txt index ca238a9681ac5..928e43d592923 100644 --- a/tests/performance/scripts/opensearch-upload/requirements.txt +++ b/tests/performance/scripts/opensearch-upload/requirements.txt @@ -1 +1 @@ -opensearch-py==2.3.1 +opensearch-py==3.1.0 From 9aeddf59177778336c50d493db7338b71c31caee Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Wed, 4 Feb 2026 22:03:18 +0100 Subject: [PATCH 118/232] docs(ocp-plugin): Add Tech Preview notice in Operator CSV (#18848) --- .../rhacs-operator.clusterserviceversion.yaml | 14 +++++++------- .../rhacs-operator.clusterserviceversion.yaml | 2 +- .../rhacs-operator.clusterserviceversion.yaml | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index 41743609ee1f5..8f2125ae73d86 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -1858,13 +1858,13 @@ spec: \ back to Central. These services allow users to enforce policies and monitor\ \ your OpenShift and Kubernetes clusters. Secured Cluster Services come as two\ \ Deployments (Sensor and Admission Controller) and one DaemonSet (Collector).\n\ - \n**Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability\ - \ management information in the OpenShift web console. Install a SecuredCluster\ - \ to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed\ - \ Operators* or by modifying the console Operator configuration.\n**Important:**\ - \ The Console Plugin requires OpenShift 4.19 or later.\n\n### Central Services\ - \ Explained\n\n| Service | Deployment Type | Description\ - \ |\n| :------------------------------- | :-------------- | :--------------\ + \n**Console Plugin (Tech Preview):** RHACS provides a dynamic plugin that displays\ + \ vulnerability management information in the OpenShift web console. Install a\ + \ SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators*\ + \ > *Installed Operators* or by modifying the console Operator configuration.\n\ + **Important:** The Console Plugin requires OpenShift 4.19 or later.\n\n### Central\ + \ Services Explained\n\n| Service | Deployment Type |\ + \ Description |\n| :------------------------------- | :-------------- | :--------------\ \ |\n| Central | Deployment | Users interact with\ \ Red Hat Advanced Cluster Security through the user interface or APIs on Central.\ \ Central also sends notifications for violations and interacts with integrations.\ diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index 967ab805dcf3f..b9cb95381c27e 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -1587,7 +1587,7 @@ spec: 2. **Secured Cluster Services** - Secured cluster services are placed on each cluster you manage and report back to Central. These services allow users to enforce policies and monitor your OpenShift and Kubernetes clusters. Secured Cluster Services come as two Deployments (Sensor and Admission Controller) and one DaemonSet (Collector). - **Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. + **Console Plugin (Tech Preview):** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. **Important:** The Console Plugin requires OpenShift 4.19 or later. ### Central Services Explained diff --git a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml index 996390bf38243..e14968528e1ee 100644 --- a/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/ui-metadata/bases/rhacs-operator.clusterserviceversion.yaml @@ -65,7 +65,7 @@ spec: 2. **Secured Cluster Services** - Secured cluster services are placed on each cluster you manage and report back to Central. These services allow users to enforce policies and monitor your OpenShift and Kubernetes clusters. Secured Cluster Services come as two Deployments (Sensor and Admission Controller) and one DaemonSet (Collector). - **Console Plugin:** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. + **Console Plugin (Tech Preview):** RHACS provides a dynamic plugin that displays vulnerability management information in the OpenShift web console. Install a SecuredCluster to deploy the plugin. Enable the plugin by selecting *Operators* > *Installed Operators* or by modifying the console Operator configuration. **Important:** The Console Plugin requires OpenShift 4.19 or later. ### Central Services Explained From 3e91cee8eb3e6caebd5b1506b0057a04f64389ae Mon Sep 17 00:00:00 2001 From: Mauro Ezequiel Moltrasio Date: Thu, 5 Feb 2026 15:31:42 +0100 Subject: [PATCH 119/232] fix: add fact component mapping (#18840) Co-authored-by: Misha Sugakov <537715+msugakov@users.noreply.github.com> --- .tekton/create-custom-snapshot.yaml | 5 +++++ .tekton/images-mirror-set.yaml | 3 +++ .tekton/operator-bundle-pipeline.yaml | 2 +- operator/konflux.bundle.Dockerfile | 4 ++++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.tekton/create-custom-snapshot.yaml b/.tekton/create-custom-snapshot.yaml index 4f1bab67c91f9..c90b1dacfaa5a 100644 --- a/.tekton/create-custom-snapshot.yaml +++ b/.tekton/create-custom-snapshot.yaml @@ -203,6 +203,11 @@ spec: "internalRepo": "quay.io/rhacs-eng/release-collector", "component": "collector" }, + { + "externalRepo": "registry.redhat.io/advanced-cluster-security/rhacs-fact-rhel9", + "internalRepo": "quay.io/rhacs-eng/release-fact", + "component": "fact" + }, { "externalRepo": "registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8", "internalRepo": "quay.io/rhacs-eng/release-main", diff --git a/.tekton/images-mirror-set.yaml b/.tekton/images-mirror-set.yaml index 3cc93ea45b2df..12bc247ba313d 100644 --- a/.tekton/images-mirror-set.yaml +++ b/.tekton/images-mirror-set.yaml @@ -17,6 +17,9 @@ spec: mirrors: - quay.io/rhacs-eng/release-collector - quay.io/rhacs-eng/release-collector-slim + - source: registry.redhat.io/advanced-cluster-security/rhacs-fact-rhel9 + mirrors: + - quay.io/rhacs-eng/release-fact - source: registry.redhat.io/advanced-cluster-security/rhacs-main-rhel8 mirrors: - quay.io/rhacs-eng/release-main diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index 0d17c968af159..e7a1b0515a69d 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -208,7 +208,7 @@ spec: - name: fact-image-catalog-repo description: Repository within the Red Hat Container Catalog where the fact image is pushed to during the release. type: string - default: "registry.redhat.io/advanced-cluster-security/rhacs-fact-rhel8" + default: "registry.redhat.io/advanced-cluster-security/rhacs-fact-rhel9" - name: roxctl-image-build-repo description: Repository where the (unreleased) roxctl image is pushed by its build pipeline. diff --git a/operator/konflux.bundle.Dockerfile b/operator/konflux.bundle.Dockerfile index 44da03fe694ac..419eb00ac862f 100644 --- a/operator/konflux.bundle.Dockerfile +++ b/operator/konflux.bundle.Dockerfile @@ -44,6 +44,10 @@ ARG RELATED_IMAGE_COLLECTOR ENV RELATED_IMAGE_COLLECTOR=$RELATED_IMAGE_COLLECTOR RUN echo "Checking required RELATED_IMAGE_COLLECTOR"; [[ "${RELATED_IMAGE_COLLECTOR}" != "" ]] +ARG RELATED_IMAGE_FACT +ENV RELATED_IMAGE_FACT=$RELATED_IMAGE_FACT +RUN echo "Checking required RELATED_IMAGE_FACT"; [[ "${RELATED_IMAGE_FACT}" != "" ]] + ARG RELATED_IMAGE_ROXCTL ENV RELATED_IMAGE_ROXCTL=$RELATED_IMAGE_ROXCTL RUN echo "Checking required RELATED_IMAGE_ROXCTL"; [[ "${RELATED_IMAGE_ROXCTL}" != "" ]] From 94f3a6e69c462717fbb002c97e30a30b65fbccb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl?= Date: Thu, 5 Feb 2026 16:46:53 +0100 Subject: [PATCH 120/232] ROX-31172: fix misleading log messages (#18669) --- .../sensor/central_communication_impl.go | 55 ++++++++++--------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/sensor/common/sensor/central_communication_impl.go b/sensor/common/sensor/central_communication_impl.go index c49af2110af25..103dd15b99776 100644 --- a/sensor/common/sensor/central_communication_impl.go +++ b/sensor/common/sensor/central_communication_impl.go @@ -209,39 +209,35 @@ func (s *centralCommunicationImpl) sendEvents(client central.SensorServiceClient log.Info("Communication with central ended.") } -func (s *centralCommunicationImpl) initialSync(ctx context.Context, stream central.SensorService_CommunicateClient, - hello *central.SensorHello, configHandler config.Handler, detector detector.Detector, -) error { +func (s *centralCommunicationImpl) hello(stream central.SensorService_CommunicateClient, hello *central.SensorHello) error { rawHdr, err := stream.Header() if err != nil { return errors.Wrap(err, "receiving headers from central") } - var centralHello *central.CentralHello + if metautils.MD(rawHdr).Get(centralsensor.SensorHelloMetadataKey) != "true" { + return errors.New("central did not acknowledge SensorHello," + + " likely due to a networking or TLS configuration issue" + + " (e.g., re-encrypt routes or TLS termination)" + + " preventing central from receiving sensor's TLS certificate") + } - hdr := metautils.MD(rawHdr) - if hdr.Get(centralsensor.SensorHelloMetadataKey) == "true" { - // Yay, central supports the "sensor hello" protocol! - err := stream.Send(¢ral.MsgFromSensor{Msg: ¢ral.MsgFromSensor_Hello{Hello: hello}}) - if err != nil { - return errors.Wrap(err, "sending SensorHello message to central") - } + var centralHello *central.CentralHello + err = stream.Send(¢ral.MsgFromSensor{Msg: ¢ral.MsgFromSensor_Hello{Hello: hello}}) + if err != nil { + return errors.Wrap(err, "sending SensorHello message to central") + } - firstMsg, err := stream.Recv() - if err != nil { - return errors.Wrap(err, "receiving first message from central") - } - centralHello = firstMsg.GetHello() - if centralHello == nil { - return errors.Errorf("first message received from central was not CentralHello but of type %T", firstMsg.GetMsg()) - } - } else { - // No sensor hello :( - log.Warn("Central is running a legacy version that might not support all current features") + firstMsg, err := stream.Recv() + if err != nil { + return errors.Wrap(err, "receiving first message from central") + } + centralHello = firstMsg.GetHello() + if centralHello == nil { + return errors.Errorf("first message received from central was not CentralHello but of type %T", firstMsg.GetMsg()) } - clusterID := centralHello.GetClusterId() - s.clusterID.Set(clusterID) + s.clusterID.Set(centralHello.GetClusterId()) if centralHello.GetManagedCentral() { log.Info("Central is managed") @@ -263,10 +259,19 @@ func (s *centralCommunicationImpl) initialSync(ctx context.Context, stream centr strconv.FormatBool(centralHello.GetSendDeduperState())) if hello.GetHelmManagedConfigInit() != nil { - if err := helmconfig.StoreCachedClusterID(clusterID); err != nil { + if err := helmconfig.StoreCachedClusterID(s.clusterID.GetNoWait()); err != nil { log.Warnf("Could not cache cluster ID: %v", err) } } + return nil +} + +func (s *centralCommunicationImpl) initialSync(ctx context.Context, stream central.SensorService_CommunicateClient, + hello *central.SensorHello, configHandler config.Handler, detector detector.Detector, +) error { + if err := s.hello(stream, hello); err != nil { + return errors.Wrap(err, "error while executing the sensor hello protocol") + } // DO NOT CHANGE THE ORDER. Please refer to `Run()` at `central/sensor/service/connection/connection_impl.go` if err := s.initialConfigSync(ctx, stream, configHandler); err != nil { From bc01df70dd5c7ef2b3713c9ce66154c1f398988c Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Thu, 5 Feb 2026 23:43:16 +0530 Subject: [PATCH 121/232] ROX-24311: Detection and enforcement for pods/attach event (#18597) --- CHANGELOG.md | 1 + generated/storage/kube_event.pb.go | 154 +++++-- generated/storage/kube_event_vtproto.pb.go | 417 ++++++++++++++++++ .../templates/admission-controller.yaml | 1 + pkg/booleanpolicy/default_policies_test.go | 79 ++++ pkg/booleanpolicy/value_regex.go | 2 +- .../violationmessages/printer/kube_event.go | 72 ++- .../printer/kube_event_test.go | 265 +++++++++++ pkg/defaults/policies/files/pod_attach.json | 47 ++ pkg/kubernetes/event.go | 35 ++ proto/storage/kube_event.proto | 7 + proto/storage/proto.lock | 20 + .../src/main/groovy/objects/Deployment.groovy | 12 + .../orchestratormanager/Kubernetes.groovy | 59 +++ .../test/groovy/K8sEventDetectionTest.groovy | 171 +++++-- .../Step3/policyCriteriaDescriptors.tsx | 6 +- 16 files changed, 1263 insertions(+), 85 deletions(-) create mode 100644 pkg/defaults/policies/files/pod_attach.json diff --git a/CHANGELOG.md b/CHANGELOG.md index b7c7bd689d63f..0221ddb19121b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ## [NEXT RELEASE] ### Added Features +- ROX-24311: Detection and enforcement for pods/attach Kubernetes event ### Removed Features diff --git a/generated/storage/kube_event.pb.go b/generated/storage/kube_event.pb.go index aaaf1b0b9366c..f16d6361dd25a 100644 --- a/generated/storage/kube_event.pb.go +++ b/generated/storage/kube_event.pb.go @@ -103,21 +103,23 @@ const ( KubernetesEvent_Object_NETWORK_POLICIES KubernetesEvent_Object_Resource = 7 KubernetesEvent_Object_SECURITY_CONTEXT_CONSTRAINTS KubernetesEvent_Object_Resource = 8 KubernetesEvent_Object_EGRESS_FIREWALLS KubernetesEvent_Object_Resource = 9 + KubernetesEvent_Object_PODS_ATTACH KubernetesEvent_Object_Resource = 10 ) // Enum value maps for KubernetesEvent_Object_Resource. var ( KubernetesEvent_Object_Resource_name = map[int32]string{ - 0: "UNKNOWN", - 1: "PODS_EXEC", - 2: "PODS_PORTFORWARD", - 3: "SECRETS", - 4: "CONFIGMAPS", - 5: "CLUSTER_ROLES", - 6: "CLUSTER_ROLE_BINDINGS", - 7: "NETWORK_POLICIES", - 8: "SECURITY_CONTEXT_CONSTRAINTS", - 9: "EGRESS_FIREWALLS", + 0: "UNKNOWN", + 1: "PODS_EXEC", + 2: "PODS_PORTFORWARD", + 3: "SECRETS", + 4: "CONFIGMAPS", + 5: "CLUSTER_ROLES", + 6: "CLUSTER_ROLE_BINDINGS", + 7: "NETWORK_POLICIES", + 8: "SECURITY_CONTEXT_CONSTRAINTS", + 9: "EGRESS_FIREWALLS", + 10: "PODS_ATTACH", } KubernetesEvent_Object_Resource_value = map[string]int32{ "UNKNOWN": 0, @@ -130,6 +132,7 @@ var ( "NETWORK_POLICIES": 7, "SECURITY_CONTEXT_CONSTRAINTS": 8, "EGRESS_FIREWALLS": 9, + "PODS_ATTACH": 10, } ) @@ -160,6 +163,7 @@ func (KubernetesEvent_Object_Resource) EnumDescriptor() ([]byte, []int) { return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 0, 0} } +// Next Tag: 22 type KubernetesEvent struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` @@ -173,6 +177,7 @@ type KubernetesEvent struct { // // *KubernetesEvent_PodExecArgs_ // *KubernetesEvent_PodPortForwardArgs_ + // *KubernetesEvent_PodAttachArgs_ ObjectArgs isKubernetesEvent_ObjectArgs `protobuf_oneof:"ObjectArgs"` // Extended arguments. May not be available for pod exec and port forward events. // These start at 15 because they were added after ObjectArgs and the previous tags are reserved in case it needs to be extended in the future. @@ -269,6 +274,15 @@ func (x *KubernetesEvent) GetPodPortForwardArgs() *KubernetesEvent_PodPortForwar return nil } +func (x *KubernetesEvent) GetPodAttachArgs() *KubernetesEvent_PodAttachArgs { + if x != nil { + if x, ok := x.ObjectArgs.(*KubernetesEvent_PodAttachArgs_); ok { + return x.PodAttachArgs + } + } + return nil +} + func (x *KubernetesEvent) GetUser() *KubernetesEvent_User { if x != nil { return x.User @@ -323,10 +337,16 @@ type KubernetesEvent_PodPortForwardArgs_ struct { PodPortForwardArgs *KubernetesEvent_PodPortForwardArgs `protobuf:"bytes,6,opt,name=pod_port_forward_args,json=podPortForwardArgs,proto3,oneof"` } +type KubernetesEvent_PodAttachArgs_ struct { + PodAttachArgs *KubernetesEvent_PodAttachArgs `protobuf:"bytes,21,opt,name=pod_attach_args,json=podAttachArgs,proto3,oneof"` +} + func (*KubernetesEvent_PodExecArgs_) isKubernetesEvent_ObjectArgs() {} func (*KubernetesEvent_PodPortForwardArgs_) isKubernetesEvent_ObjectArgs() {} +func (*KubernetesEvent_PodAttachArgs_) isKubernetesEvent_ObjectArgs() {} + type KubernetesEvent_Object struct { state protoimpl.MessageState `protogen:"open.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty" policy:"Kubernetes Resource Name"` // @gotags: policy:"Kubernetes Resource Name" @@ -491,6 +511,50 @@ func (x *KubernetesEvent_PodPortForwardArgs) GetPorts() []int32 { return nil } +type KubernetesEvent_PodAttachArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Container string `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *KubernetesEvent_PodAttachArgs) Reset() { + *x = KubernetesEvent_PodAttachArgs{} + mi := &file_storage_kube_event_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *KubernetesEvent_PodAttachArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KubernetesEvent_PodAttachArgs) ProtoMessage() {} + +func (x *KubernetesEvent_PodAttachArgs) ProtoReflect() protoreflect.Message { + mi := &file_storage_kube_event_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KubernetesEvent_PodAttachArgs.ProtoReflect.Descriptor instead. +func (*KubernetesEvent_PodAttachArgs) Descriptor() ([]byte, []int) { + return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 3} +} + +func (x *KubernetesEvent_PodAttachArgs) GetContainer() string { + if x != nil { + return x.Container + } + return "" +} + type KubernetesEvent_ResponseStatus struct { state protoimpl.MessageState `protogen:"open.v1"` StatusCode int32 `protobuf:"varint,1,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` @@ -501,7 +565,7 @@ type KubernetesEvent_ResponseStatus struct { func (x *KubernetesEvent_ResponseStatus) Reset() { *x = KubernetesEvent_ResponseStatus{} - mi := &file_storage_kube_event_proto_msgTypes[4] + mi := &file_storage_kube_event_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -513,7 +577,7 @@ func (x *KubernetesEvent_ResponseStatus) String() string { func (*KubernetesEvent_ResponseStatus) ProtoMessage() {} func (x *KubernetesEvent_ResponseStatus) ProtoReflect() protoreflect.Message { - mi := &file_storage_kube_event_proto_msgTypes[4] + mi := &file_storage_kube_event_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -526,7 +590,7 @@ func (x *KubernetesEvent_ResponseStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use KubernetesEvent_ResponseStatus.ProtoReflect.Descriptor instead. func (*KubernetesEvent_ResponseStatus) Descriptor() ([]byte, []int) { - return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 3} + return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 4} } func (x *KubernetesEvent_ResponseStatus) GetStatusCode() int32 { @@ -553,7 +617,7 @@ type KubernetesEvent_User struct { func (x *KubernetesEvent_User) Reset() { *x = KubernetesEvent_User{} - mi := &file_storage_kube_event_proto_msgTypes[5] + mi := &file_storage_kube_event_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -565,7 +629,7 @@ func (x *KubernetesEvent_User) String() string { func (*KubernetesEvent_User) ProtoMessage() {} func (x *KubernetesEvent_User) ProtoReflect() protoreflect.Message { - mi := &file_storage_kube_event_proto_msgTypes[5] + mi := &file_storage_kube_event_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -578,7 +642,7 @@ func (x *KubernetesEvent_User) ProtoReflect() protoreflect.Message { // Deprecated: Use KubernetesEvent_User.ProtoReflect.Descriptor instead. func (*KubernetesEvent_User) Descriptor() ([]byte, []int) { - return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 4} + return file_storage_kube_event_proto_rawDescGZIP(), []int{0, 5} } func (x *KubernetesEvent_User) GetUsername() string { @@ -599,14 +663,15 @@ var File_storage_kube_event_proto protoreflect.FileDescriptor const file_storage_kube_event_proto_rawDesc = "" + "\n" + - "\x18storage/kube_event.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa3\v\n" + + "\x18storage/kube_event.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb5\f\n" + "\x0fKubernetesEvent\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x127\n" + "\x06object\x18\x02 \x01(\v2\x1f.storage.KubernetesEvent.ObjectR\x06object\x128\n" + "\ttimestamp\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12;\n" + "\bapi_verb\x18\x04 \x01(\x0e2 .storage.KubernetesEvent.APIVerbR\aapiVerb\x12J\n" + "\rpod_exec_args\x18\x05 \x01(\v2$.storage.KubernetesEvent.PodExecArgsH\x00R\vpodExecArgs\x12`\n" + - "\x15pod_port_forward_args\x18\x06 \x01(\v2+.storage.KubernetesEvent.PodPortForwardArgsH\x00R\x12podPortForwardArgs\x121\n" + + "\x15pod_port_forward_args\x18\x06 \x01(\v2+.storage.KubernetesEvent.PodPortForwardArgsH\x00R\x12podPortForwardArgs\x12P\n" + + "\x0fpod_attach_args\x18\x15 \x01(\v2&.storage.KubernetesEvent.PodAttachArgsH\x00R\rpodAttachArgs\x121\n" + "\x04user\x18\x0f \x01(\v2\x1d.storage.KubernetesEvent.UserR\x04user\x12J\n" + "\x11impersonated_user\x18\x10 \x01(\v2\x1d.storage.KubernetesEvent.UserR\x10impersonatedUser\x12\x1d\n" + "\n" + @@ -615,13 +680,13 @@ const file_storage_kube_event_proto_rawDesc = "" + "user_agent\x18\x12 \x01(\tR\tuserAgent\x12P\n" + "\x0fresponse_status\x18\x13 \x01(\v2'.storage.KubernetesEvent.ResponseStatusR\x0eresponseStatus\x12\x1f\n" + "\vrequest_uri\x18\x14 \x01(\tR\n" + - "requestUri\x1a\xf7\x02\n" + + "requestUri\x1a\x88\x03\n" + "\x06Object\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12D\n" + "\bresource\x18\x02 \x01(\x0e2(.storage.KubernetesEvent.Object.ResourceR\bresource\x12\x1d\n" + "\n" + "cluster_id\x18\x03 \x01(\tR\tclusterId\x12\x1c\n" + - "\tnamespace\x18\x04 \x01(\tR\tnamespace\"\xd5\x01\n" + + "\tnamespace\x18\x04 \x01(\tR\tnamespace\"\xe6\x01\n" + "\bResource\x12\v\n" + "\aUNKNOWN\x10\x00\x12\r\n" + "\tPODS_EXEC\x10\x01\x12\x14\n" + @@ -633,12 +698,16 @@ const file_storage_kube_event_proto_rawDesc = "" + "\x15CLUSTER_ROLE_BINDINGS\x10\x06\x12\x14\n" + "\x10NETWORK_POLICIES\x10\a\x12 \n" + "\x1cSECURITY_CONTEXT_CONSTRAINTS\x10\b\x12\x14\n" + - "\x10EGRESS_FIREWALLS\x10\t\x1aG\n" + + "\x10EGRESS_FIREWALLS\x10\t\x12\x0f\n" + + "\vPODS_ATTACH\x10\n" + + "\x1aG\n" + "\vPodExecArgs\x12\x1c\n" + "\tcontainer\x18\x01 \x01(\tR\tcontainer\x12\x1a\n" + "\bcommands\x18\x02 \x03(\tR\bcommands\x1a*\n" + "\x12PodPortForwardArgs\x12\x14\n" + - "\x05ports\x18\x01 \x03(\x05R\x05ports\x1aI\n" + + "\x05ports\x18\x01 \x03(\x05R\x05ports\x1a-\n" + + "\rPodAttachArgs\x12\x1c\n" + + "\tcontainer\x18\x01 \x01(\tR\tcontainer\x1aI\n" + "\x0eResponseStatus\x12\x1f\n" + "\vstatus_code\x18\x01 \x01(\x05R\n" + "statusCode\x12\x16\n" + @@ -676,7 +745,7 @@ func file_storage_kube_event_proto_rawDescGZIP() []byte { } var file_storage_kube_event_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_storage_kube_event_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_storage_kube_event_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_storage_kube_event_proto_goTypes = []any{ (KubernetesEvent_APIVerb)(0), // 0: storage.KubernetesEvent.APIVerb (KubernetesEvent_Object_Resource)(0), // 1: storage.KubernetesEvent.Object.Resource @@ -684,25 +753,27 @@ var file_storage_kube_event_proto_goTypes = []any{ (*KubernetesEvent_Object)(nil), // 3: storage.KubernetesEvent.Object (*KubernetesEvent_PodExecArgs)(nil), // 4: storage.KubernetesEvent.PodExecArgs (*KubernetesEvent_PodPortForwardArgs)(nil), // 5: storage.KubernetesEvent.PodPortForwardArgs - (*KubernetesEvent_ResponseStatus)(nil), // 6: storage.KubernetesEvent.ResponseStatus - (*KubernetesEvent_User)(nil), // 7: storage.KubernetesEvent.User - (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*KubernetesEvent_PodAttachArgs)(nil), // 6: storage.KubernetesEvent.PodAttachArgs + (*KubernetesEvent_ResponseStatus)(nil), // 7: storage.KubernetesEvent.ResponseStatus + (*KubernetesEvent_User)(nil), // 8: storage.KubernetesEvent.User + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp } var file_storage_kube_event_proto_depIdxs = []int32{ - 3, // 0: storage.KubernetesEvent.object:type_name -> storage.KubernetesEvent.Object - 8, // 1: storage.KubernetesEvent.timestamp:type_name -> google.protobuf.Timestamp - 0, // 2: storage.KubernetesEvent.api_verb:type_name -> storage.KubernetesEvent.APIVerb - 4, // 3: storage.KubernetesEvent.pod_exec_args:type_name -> storage.KubernetesEvent.PodExecArgs - 5, // 4: storage.KubernetesEvent.pod_port_forward_args:type_name -> storage.KubernetesEvent.PodPortForwardArgs - 7, // 5: storage.KubernetesEvent.user:type_name -> storage.KubernetesEvent.User - 7, // 6: storage.KubernetesEvent.impersonated_user:type_name -> storage.KubernetesEvent.User - 6, // 7: storage.KubernetesEvent.response_status:type_name -> storage.KubernetesEvent.ResponseStatus - 1, // 8: storage.KubernetesEvent.Object.resource:type_name -> storage.KubernetesEvent.Object.Resource - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 3, // 0: storage.KubernetesEvent.object:type_name -> storage.KubernetesEvent.Object + 9, // 1: storage.KubernetesEvent.timestamp:type_name -> google.protobuf.Timestamp + 0, // 2: storage.KubernetesEvent.api_verb:type_name -> storage.KubernetesEvent.APIVerb + 4, // 3: storage.KubernetesEvent.pod_exec_args:type_name -> storage.KubernetesEvent.PodExecArgs + 5, // 4: storage.KubernetesEvent.pod_port_forward_args:type_name -> storage.KubernetesEvent.PodPortForwardArgs + 6, // 5: storage.KubernetesEvent.pod_attach_args:type_name -> storage.KubernetesEvent.PodAttachArgs + 8, // 6: storage.KubernetesEvent.user:type_name -> storage.KubernetesEvent.User + 8, // 7: storage.KubernetesEvent.impersonated_user:type_name -> storage.KubernetesEvent.User + 7, // 8: storage.KubernetesEvent.response_status:type_name -> storage.KubernetesEvent.ResponseStatus + 1, // 9: storage.KubernetesEvent.Object.resource:type_name -> storage.KubernetesEvent.Object.Resource + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name } func init() { file_storage_kube_event_proto_init() } @@ -713,6 +784,7 @@ func file_storage_kube_event_proto_init() { file_storage_kube_event_proto_msgTypes[0].OneofWrappers = []any{ (*KubernetesEvent_PodExecArgs_)(nil), (*KubernetesEvent_PodPortForwardArgs_)(nil), + (*KubernetesEvent_PodAttachArgs_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -720,7 +792,7 @@ func file_storage_kube_event_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_storage_kube_event_proto_rawDesc), len(file_storage_kube_event_proto_rawDesc)), NumEnums: 2, - NumMessages: 6, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, diff --git a/generated/storage/kube_event_vtproto.pb.go b/generated/storage/kube_event_vtproto.pb.go index 778cd4e46b075..d3b717e47378d 100644 --- a/generated/storage/kube_event_vtproto.pb.go +++ b/generated/storage/kube_event_vtproto.pb.go @@ -85,6 +85,23 @@ func (m *KubernetesEvent_PodPortForwardArgs) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *KubernetesEvent_PodAttachArgs) CloneVT() *KubernetesEvent_PodAttachArgs { + if m == nil { + return (*KubernetesEvent_PodAttachArgs)(nil) + } + r := new(KubernetesEvent_PodAttachArgs) + r.Container = m.Container + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KubernetesEvent_PodAttachArgs) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *KubernetesEvent_ResponseStatus) CloneVT() *KubernetesEvent_ResponseStatus { if m == nil { return (*KubernetesEvent_ResponseStatus)(nil) @@ -178,6 +195,15 @@ func (m *KubernetesEvent_PodPortForwardArgs_) CloneVT() isKubernetesEvent_Object return r } +func (m *KubernetesEvent_PodAttachArgs_) CloneVT() isKubernetesEvent_ObjectArgs { + if m == nil { + return (*KubernetesEvent_PodAttachArgs_)(nil) + } + r := new(KubernetesEvent_PodAttachArgs_) + r.PodAttachArgs = m.PodAttachArgs.CloneVT() + return r +} + func (this *KubernetesEvent_Object) EqualVT(that *KubernetesEvent_Object) bool { if this == that { return true @@ -259,6 +285,25 @@ func (this *KubernetesEvent_PodPortForwardArgs) EqualMessageVT(thatMsg proto.Mes } return this.EqualVT(that) } +func (this *KubernetesEvent_PodAttachArgs) EqualVT(that *KubernetesEvent_PodAttachArgs) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Container != that.Container { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *KubernetesEvent_PodAttachArgs) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*KubernetesEvent_PodAttachArgs) + if !ok { + return false + } + return this.EqualVT(that) +} func (this *KubernetesEvent_ResponseStatus) EqualVT(that *KubernetesEvent_ResponseStatus) bool { if this == that { return true @@ -423,6 +468,31 @@ func (this *KubernetesEvent_PodPortForwardArgs_) EqualVT(thatIface isKubernetesE return true } +func (this *KubernetesEvent_PodAttachArgs_) EqualVT(thatIface isKubernetesEvent_ObjectArgs) bool { + that, ok := thatIface.(*KubernetesEvent_PodAttachArgs_) + if !ok { + return false + } + if this == that { + return true + } + if this == nil && that != nil || this != nil && that == nil { + return false + } + if p, q := this.PodAttachArgs, that.PodAttachArgs; p != q { + if p == nil { + p = &KubernetesEvent_PodAttachArgs{} + } + if q == nil { + q = &KubernetesEvent_PodAttachArgs{} + } + if !p.EqualVT(q) { + return false + } + } + return true +} + func (m *KubernetesEvent_Object) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -585,6 +655,46 @@ func (m *KubernetesEvent_PodPortForwardArgs) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } +func (m *KubernetesEvent_PodAttachArgs) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KubernetesEvent_PodAttachArgs) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KubernetesEvent_PodAttachArgs) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Container) > 0 { + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *KubernetesEvent_ResponseStatus) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -862,6 +972,33 @@ func (m *KubernetesEvent_PodPortForwardArgs_) MarshalToSizedBufferVT(dAtA []byte } return len(dAtA) - i, nil } +func (m *KubernetesEvent_PodAttachArgs_) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KubernetesEvent_PodAttachArgs_) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PodAttachArgs != nil { + size, err := m.PodAttachArgs.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } else { + i = protohelpers.EncodeVarint(dAtA, i, 0) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + return len(dAtA) - i, nil +} func (m *KubernetesEvent_Object) SizeVT() (n int) { if m == nil { return 0 @@ -924,6 +1061,20 @@ func (m *KubernetesEvent_PodPortForwardArgs) SizeVT() (n int) { return n } +func (m *KubernetesEvent_PodAttachArgs) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *KubernetesEvent_ResponseStatus) SizeVT() (n int) { if m == nil { return 0 @@ -1043,6 +1194,20 @@ func (m *KubernetesEvent_PodPortForwardArgs_) SizeVT() (n int) { } return n } +func (m *KubernetesEvent_PodAttachArgs_) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodAttachArgs != nil { + l = m.PodAttachArgs.SizeVT() + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } else { + n += 3 + } + return n +} func (m *KubernetesEvent_Object) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1451,6 +1616,89 @@ func (m *KubernetesEvent_PodPortForwardArgs) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *KubernetesEvent_PodAttachArgs) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesEvent_PodAttachArgs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesEvent_PodAttachArgs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *KubernetesEvent_ResponseStatus) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2106,6 +2354,47 @@ func (m *KubernetesEvent) UnmarshalVT(dAtA []byte) error { } m.RequestUri = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAttachArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.ObjectArgs.(*KubernetesEvent_PodAttachArgs_); ok { + if err := oneof.PodAttachArgs.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &KubernetesEvent_PodAttachArgs{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.ObjectArgs = &KubernetesEvent_PodAttachArgs_{PodAttachArgs: v} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -2556,6 +2845,93 @@ func (m *KubernetesEvent_PodPortForwardArgs) UnmarshalVTUnsafe(dAtA []byte) erro } return nil } +func (m *KubernetesEvent_PodAttachArgs) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KubernetesEvent_PodAttachArgs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KubernetesEvent_PodAttachArgs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Container = stringValue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *KubernetesEvent_ResponseStatus) UnmarshalVTUnsafe(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3239,6 +3615,47 @@ func (m *KubernetesEvent) UnmarshalVTUnsafe(dAtA []byte) error { } m.RequestUri = stringValue iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAttachArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.ObjectArgs.(*KubernetesEvent_PodAttachArgs_); ok { + if err := oneof.PodAttachArgs.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &KubernetesEvent_PodAttachArgs{} + if err := v.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.ObjectArgs = &KubernetesEvent_PodAttachArgs_{PodAttachArgs: v} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/image/templates/helm/stackrox-secured-cluster/templates/admission-controller.yaml b/image/templates/helm/stackrox-secured-cluster/templates/admission-controller.yaml index a27066fbc6df2..d6786bc789d6e 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/admission-controller.yaml +++ b/image/templates/helm/stackrox-secured-cluster/templates/admission-controller.yaml @@ -264,6 +264,7 @@ webhooks: - pods - pods/exec - pods/portforward + - pods/attach failurePolicy: {{ ._rox.admissionControl.failurePolicy | quote }} clientConfig: caBundle: {{ required "The 'ca.cert' config option MUST be set to StackRox's Service CA certificate in order for the admission controller to be usable" ._rox.ca._cert | b64enc }} diff --git a/pkg/booleanpolicy/default_policies_test.go b/pkg/booleanpolicy/default_policies_test.go index ad8356f49150b..30d3a641f3856 100644 --- a/pkg/booleanpolicy/default_policies_test.go +++ b/pkg/booleanpolicy/default_policies_test.go @@ -3283,6 +3283,7 @@ func (suite *DefaultPoliciesTestSuite) TestProcessBaseline() { func (suite *DefaultPoliciesTestSuite) TestKubeEventConstraints() { podExecGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_EXEC", false) + podAttachGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_ATTACH", false) aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) @@ -3293,6 +3294,7 @@ func (suite *DefaultPoliciesTestSuite) TestKubeEventConstraints() { builderErr bool withProcessSection bool }{ + // PODS_EXEC test cases { event: podExecEvent("p1", "c1", "cmd"), groups: []*storage.PolicyGroup{podExecGroup}, @@ -3322,6 +3324,42 @@ func (suite *DefaultPoliciesTestSuite) TestKubeEventConstraints() { expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, withProcessSection: true, }, + // PODS_ATTACH test cases + { + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podAttachGroup}, + expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "c1")}, + }, + { + event: podAttachEvent("p1", ""), + groups: []*storage.PolicyGroup{podAttachGroup}, + expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "")}, + }, + { + // No event provided, should not match + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Port forward event should not match attach policy + event: podPortForwardEvent("p1", 8000), + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Exec event should not match attach policy + event: podExecEvent("p1", "c1", "cmd"), + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Attach event should not match exec policy + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podExecGroup}, + }, + { + // Attach policy with process group should fail builder + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podAttachGroup, aptGetGroup}, + builderErr: true, + }, } { suite.T().Run(fmt.Sprintf("%+v", c.groups), func(t *testing.T) { policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) @@ -4076,6 +4114,47 @@ func podPortForwardEvent(pod string, port int32) *storage.KubernetesEvent { } } +func podAttachEvent(pod, container string) *storage.KubernetesEvent { + return &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: pod, + Resource: storage.KubernetesEvent_Object_PODS_ATTACH, + }, + ObjectArgs: &storage.KubernetesEvent_PodAttachArgs_{ + PodAttachArgs: &storage.KubernetesEvent_PodAttachArgs{ + Container: container, + }, + }, + } +} + +func podAttachViolationMsg(pod, container string) *storage.Alert_Violation { + attrs := []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: "pod", Value: pod}, + } + if container != "" { + attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: "container", Value: container}) + } + + message := "Kubernetes API received attach request" + if pod != "" { + message = fmt.Sprintf("Kubernetes API received attach request to pod '%s'", pod) + if container != "" { + message = fmt.Sprintf("Kubernetes API received attach request to pod '%s' container '%s'", pod, container) + } + } + + return &storage.Alert_Violation{ + Message: message, + Type: storage.Alert_Violation_K8S_EVENT, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: attrs, + }, + }, + } +} + func assertViolations(t testing.TB, expected, actual Violations) { t.Helper() protoassert.Equal(t, expected.ProcessViolation, actual.ProcessViolation) diff --git a/pkg/booleanpolicy/value_regex.go b/pkg/booleanpolicy/value_regex.go index ee4422b2ebeae..705b6f2619748 100644 --- a/pkg/booleanpolicy/value_regex.go +++ b/pkg/booleanpolicy/value_regex.go @@ -38,7 +38,7 @@ var ( addCapabilitiesValueRegex = createRegex("(?i:(AUDIT_CONTROL|AUDIT_READ|AUDIT_WRITE|BLOCK_SUSPEND|CHOWN|DAC_OVERRIDE|DAC_READ_SEARCH|FOWNER|FSETID|IPC_LOCK|IPC_OWNER|KILL|LEASE|LINUX_IMMUTABLE|MAC_ADMIN|MAC_OVERRIDE|MKNOD|NET_ADMIN|NET_BIND_SERVICE|NET_BROADCAST|NET_RAW|SETGID|SETFCAP|SETPCAP|SETUID|SYS_ADMIN|SYS_BOOT|SYS_CHROOT|SYS_MODULE|SYS_NICE|SYS_PACCT|SYS_PTRACE|SYS_RAWIO|SYS_RESOURCE|SYS_TIME|SYS_TTY_CONFIG|SYSLOG|WAKE_ALARM))") rbacPermissionValueRegex = createRegex("(?i:DEFAULT|ELEVATED_IN_NAMESPACE|ELEVATED_CLUSTER_WIDE|CLUSTER_ADMIN)") portExposureValueRegex = createRegex("(?i:UNSET|EXTERNAL|NODE|HOST|INTERNAL|ROUTE)") - kubernetesResourceValueRegex = createRegex(`(?i:PODS_EXEC|PODS_PORTFORWARD)`) + kubernetesResourceValueRegex = createRegex(`(?i:PODS_EXEC|PODS_PORTFORWARD|PODS_ATTACH)`) mountPropagationValueRegex = createRegex("(?i:NONE|HOSTTOCONTAINER|BIDIRECTIONAL)") seccompProfileTypeValueRegex = createRegex(`(?i:UNCONFINED|RUNTIME_DEFAULT|LOCALHOST)`) severityValueRegex = createRegex(`(<|>|<=|>=)?[[:space:]]*(?i:UNKNOWN|LOW|MODERATE|IMPORTANT|CRITICAL)`) diff --git a/pkg/booleanpolicy/violationmessages/printer/kube_event.go b/pkg/booleanpolicy/violationmessages/printer/kube_event.go index 5f705074db665..b62b74152d314 100644 --- a/pkg/booleanpolicy/violationmessages/printer/kube_event.go +++ b/pkg/booleanpolicy/violationmessages/printer/kube_event.go @@ -50,6 +50,8 @@ func GenerateKubeEventViolationMsg(event *storage.KubernetesEvent) *storage.Aler message, attrs = podExecViolationMsg(event) case storage.KubernetesEvent_Object_PODS_PORTFORWARD: message, attrs = podPortForwardViolationMsg(event) + case storage.KubernetesEvent_Object_PODS_ATTACH: + message, attrs = podAttachViolationMsg(event) default: message, attrs = defaultViolationMsg(event) } @@ -78,6 +80,10 @@ func podPortForwardViolationMsg(event *storage.KubernetesEvent) (string, []*stor return getPFMsgHeader(event), getPFMsgViolationAttr(event) } +func podAttachViolationMsg(event *storage.KubernetesEvent) (string, []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr) { + return getAttachMsgHeader(event), getAttachMsgViolationAttr(event) +} + func getDefaultViolationMsgHeader(event *storage.KubernetesEvent) string { object := event.GetObject() readableResourceName := strings.ToLower(object.GetResource().String()) @@ -172,25 +178,47 @@ func getExecMsgHeader(event *storage.KubernetesEvent) string { return stringutils.JoinNonEmpty(" ", prefix, cmds, "request", pod, container) } -func getExecMsgViolationAttr(event *storage.KubernetesEvent) []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr { - attrs := make([]*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr, 0, 3) +// podEventAttributes holds optional event-specific attributes for pod-related events. +type podEventAttributes struct { + container string + commands string + ports string +} + +// getPodEventViolationAttr builds violation attributes for pod events (exec, attach, port-forward). +// It handles the common pod attribute and appends event-specific and default attributes. +func getPodEventViolationAttr(event *storage.KubernetesEvent, eventSpecificAttr podEventAttributes) []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr { + attrs := make([]*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr, 0, 4) + + // Common: pod name if pod := event.GetObject().GetName(); pod != "" { attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: PodKey, Value: pod}) } - args := event.GetPodExecArgs() - if container := args.GetContainer(); container != "" { - attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: ContainerKey, Value: container}) + // Event-specific attributes + if eventSpecificAttr.container != "" { + attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: ContainerKey, Value: eventSpecificAttr.container}) } - - if cmds := stringutils.JoinNonEmpty(" ", args.GetCommands()...); cmds != "" { - attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: CommandsKey, Value: cmds}) + if eventSpecificAttr.commands != "" { + attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: CommandsKey, Value: eventSpecificAttr.commands}) + } + if eventSpecificAttr.ports != "" { + attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: PortsKey, Value: eventSpecificAttr.ports}) } + // Common: default attrs attrs = append(attrs, getDefaultViolationMsgViolationAttr(event, &attributeOptions{skipVerb: true, skipResourceURI: true})...) return attrs } +func getExecMsgViolationAttr(event *storage.KubernetesEvent) []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr { + args := event.GetPodExecArgs() + return getPodEventViolationAttr(event, podEventAttributes{ + container: args.GetContainer(), + commands: stringutils.JoinNonEmpty(" ", args.GetCommands()...), + }) +} + func getPFMsgHeader(event *storage.KubernetesEvent) string { pod := event.GetObject().GetName() ports := stringutils.JoinInt32(", ", event.GetPodPortForwardArgs().GetPorts()...) @@ -208,15 +236,29 @@ func getPFMsgHeader(event *storage.KubernetesEvent) string { } func getPFMsgViolationAttr(event *storage.KubernetesEvent) []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr { - attrs := make([]*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr, 0, 2) - if pod := event.GetObject().GetName(); pod != "" { - attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: PodKey, Value: pod}) + return getPodEventViolationAttr(event, podEventAttributes{ + ports: stringutils.JoinInt32(", ", event.GetPodPortForwardArgs().GetPorts()...), + }) +} + +func getAttachMsgHeader(event *storage.KubernetesEvent) string { + pod := event.GetObject().GetName() + container := event.GetPodAttachArgs().GetContainer() + + prefix := "Kubernetes API received attach" + if pod != "" { + pod = fmt.Sprintf("to pod '%s'", pod) } - if ports := stringutils.JoinInt32(", ", event.GetPodPortForwardArgs().GetPorts()...); ports != "" { - attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: PortsKey, Value: ports}) + if container != "" { + container = fmt.Sprintf("container '%s'", container) } - attrs = append(attrs, getDefaultViolationMsgViolationAttr(event, &attributeOptions{skipVerb: true, skipResourceURI: true})...) - return attrs + return stringutils.JoinNonEmpty(" ", prefix, "request", pod, container) +} + +func getAttachMsgViolationAttr(event *storage.KubernetesEvent) []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr { + return getPodEventViolationAttr(event, podEventAttributes{ + container: event.GetPodAttachArgs().GetContainer(), + }) } diff --git a/pkg/booleanpolicy/violationmessages/printer/kube_event_test.go b/pkg/booleanpolicy/violationmessages/printer/kube_event_test.go index 5c214c3f837f0..de4fd678ce4ee 100644 --- a/pkg/booleanpolicy/violationmessages/printer/kube_event_test.go +++ b/pkg/booleanpolicy/violationmessages/printer/kube_event_test.go @@ -90,6 +90,202 @@ func validateViolationAttrs(t *testing.T, kubeEvent *storage.KubernetesEvent, ex } } +func TestViolationMessageForPodExecEvents(t *testing.T) { + cases := []struct { + testName string + podName string + container string + commands []string + expectedMsg string + }{ + { + "Exec with pod, container, and commands", + "my-pod", + "my-container", + []string{"ls", "-l"}, + "Kubernetes API received exec 'ls -l' request into pod 'my-pod' container 'my-container'", + }, + { + "Exec with pod and commands, no container", + "my-pod", + "", + []string{"cat", "/etc/passwd"}, + "Kubernetes API received exec 'cat /etc/passwd' request into pod 'my-pod'", + }, + { + "Exec with pod and container, no commands", + "my-pod", + "my-container", + nil, + "Kubernetes API received exec request into pod 'my-pod' container 'my-container'", + }, + { + "Exec with pod only", + "my-pod", + "", + nil, + "Kubernetes API received exec request into pod 'my-pod'", + }, + { + "Exec with no details", + "", + "", + nil, + "Kubernetes API received exec request", + }, + } + + for _, c := range cases { + t.Run(c.testName, func(t *testing.T) { + kubeEvent := getPodExecEvent(c.podName, c.container, c.commands) + violation := GenerateKubeEventViolationMsg(kubeEvent) + assert.Equal(t, c.expectedMsg, violation.GetMessage()) + assert.Equal(t, storage.Alert_Violation_K8S_EVENT, violation.GetType()) + }) + } +} + +func TestViolationAttrsForPodExecEvents(t *testing.T) { + kubeEvent := getPodExecEvent("my-pod", "my-container", []string{"ls", "-l"}) + violation := GenerateKubeEventViolationMsg(kubeEvent) + attrs := violation.GetKeyValueAttrs().GetAttrs() + + expectedAttrs := map[string]string{ + PodKey: "my-pod", + ContainerKey: "my-container", + CommandsKey: "ls -l", + UsernameKey: "username", + UserGroupsKey: "groupA, groupB", + UserAgentKey: "curl", + IPAddressKey: "192.168.1.1, 127.0.0.1", + } + + assert.Len(t, attrs, len(expectedAttrs)) + for _, a := range attrs { + assert.Equal(t, expectedAttrs[a.GetKey()], a.GetValue()) + } +} + +func TestViolationMessageForPodPortForwardEvents(t *testing.T) { + cases := []struct { + testName string + podName string + ports []int32 + expectedMsg string + }{ + { + "Port forward with pod and ports", + "my-pod", + []int32{8080, 9090}, + "Kubernetes API received port forward request to pod 'my-pod' ports '8080, 9090'", + }, + { + "Port forward with pod and single port", + "my-pod", + []int32{8080}, + "Kubernetes API received port forward request to pod 'my-pod' ports '8080'", + }, + { + "Port forward with pod only", + "my-pod", + nil, + "Kubernetes API received port forward request to pod 'my-pod'", + }, + { + "Port forward with no details", + "", + nil, + "Kubernetes API received port forward request", + }, + } + + for _, c := range cases { + t.Run(c.testName, func(t *testing.T) { + kubeEvent := getPodPortForwardEvent(c.podName, c.ports) + violation := GenerateKubeEventViolationMsg(kubeEvent) + assert.Equal(t, c.expectedMsg, violation.GetMessage()) + assert.Equal(t, storage.Alert_Violation_K8S_EVENT, violation.GetType()) + }) + } +} + +func TestViolationAttrsForPodPortForwardEvents(t *testing.T) { + kubeEvent := getPodPortForwardEvent("my-pod", []int32{8080, 9090}) + violation := GenerateKubeEventViolationMsg(kubeEvent) + attrs := violation.GetKeyValueAttrs().GetAttrs() + + expectedAttrs := map[string]string{ + PodKey: "my-pod", + PortsKey: "8080, 9090", + UsernameKey: "username", + UserGroupsKey: "groupA, groupB", + UserAgentKey: "curl", + IPAddressKey: "192.168.1.1, 127.0.0.1", + } + + assert.Len(t, attrs, len(expectedAttrs)) + for _, a := range attrs { + assert.Equal(t, expectedAttrs[a.GetKey()], a.GetValue()) + } +} + +func TestViolationMessageForPodAttachEvents(t *testing.T) { + cases := []struct { + testName string + podName string + container string + expectedMsg string + }{ + { + "Attach with pod and container", + "my-pod", + "my-container", + "Kubernetes API received attach request to pod 'my-pod' container 'my-container'", + }, + { + "Attach with pod only", + "my-pod", + "", + "Kubernetes API received attach request to pod 'my-pod'", + }, + { + "Attach with no details", + "", + "", + "Kubernetes API received attach request", + }, + } + + for _, c := range cases { + t.Run(c.testName, func(t *testing.T) { + kubeEvent := getPodAttachEvent(c.podName, c.container) + violation := GenerateKubeEventViolationMsg(kubeEvent) + assert.Equal(t, c.expectedMsg, violation.GetMessage()) + assert.Equal(t, storage.Alert_Violation_K8S_EVENT, violation.GetType()) + }) + } +} + +func TestViolationAttrsForPodAttachEvents(t *testing.T) { + kubeEvent := getPodAttachEvent("my-pod", "my-container") + violation := GenerateKubeEventViolationMsg(kubeEvent) + attrs := violation.GetKeyValueAttrs().GetAttrs() + + expectedAttrs := map[string]string{ + PodKey: "my-pod", + ContainerKey: "my-container", + UsernameKey: "username", + UserGroupsKey: "groupA, groupB", + UserAgentKey: "curl", + IPAddressKey: "192.168.1.1, 127.0.0.1", + } + + assert.Len(t, attrs, len(expectedAttrs)) + for _, a := range attrs { + assert.Equal(t, expectedAttrs[a.GetKey()], a.GetValue()) + } +} + func getKubeEvent(resource storage.KubernetesEvent_Object_Resource, verb storage.KubernetesEvent_APIVerb, clusterID, namespace, name string) *storage.KubernetesEvent { requestURI := fmt.Sprintf("/api/v1/namespaces/%s/%s/%s", namespace, strings.ToLower(resource.String()), name) if verb == storage.KubernetesEvent_LIST { @@ -118,3 +314,72 @@ func getKubeEvent(resource storage.KubernetesEvent_Object_Resource, verb storage RequestUri: requestURI, } } + +func getBaseKubeEvent() *storage.KubernetesEvent { + return &storage.KubernetesEvent{ + Id: uuid.NewV4().String(), + Timestamp: protocompat.TimestampNow(), + User: &storage.KubernetesEvent_User{ + Username: "username", + Groups: []string{"groupA", "groupB"}, + }, + SourceIps: []string{"192.168.1.1", "127.0.0.1"}, + UserAgent: "curl", + ResponseStatus: &storage.KubernetesEvent_ResponseStatus{ + StatusCode: 200, + Reason: "OK", + }, + } +} + +func getPodExecEvent(podName, container string, commands []string) *storage.KubernetesEvent { + event := getBaseKubeEvent() + event.Object = &storage.KubernetesEvent_Object{ + Name: podName, + Resource: storage.KubernetesEvent_Object_PODS_EXEC, + ClusterId: "cluster-id", + Namespace: "ns", + } + event.ApiVerb = storage.KubernetesEvent_CREATE + event.ObjectArgs = &storage.KubernetesEvent_PodExecArgs_{ + PodExecArgs: &storage.KubernetesEvent_PodExecArgs{ + Container: container, + Commands: commands, + }, + } + return event +} + +func getPodPortForwardEvent(podName string, ports []int32) *storage.KubernetesEvent { + event := getBaseKubeEvent() + event.Object = &storage.KubernetesEvent_Object{ + Name: podName, + Resource: storage.KubernetesEvent_Object_PODS_PORTFORWARD, + ClusterId: "cluster-id", + Namespace: "ns", + } + event.ApiVerb = storage.KubernetesEvent_CREATE + event.ObjectArgs = &storage.KubernetesEvent_PodPortForwardArgs_{ + PodPortForwardArgs: &storage.KubernetesEvent_PodPortForwardArgs{ + Ports: ports, + }, + } + return event +} + +func getPodAttachEvent(podName, container string) *storage.KubernetesEvent { + event := getBaseKubeEvent() + event.Object = &storage.KubernetesEvent_Object{ + Name: podName, + Resource: storage.KubernetesEvent_Object_PODS_ATTACH, + ClusterId: "cluster-id", + Namespace: "ns", + } + event.ApiVerb = storage.KubernetesEvent_CREATE + event.ObjectArgs = &storage.KubernetesEvent_PodAttachArgs_{ + PodAttachArgs: &storage.KubernetesEvent_PodAttachArgs{ + Container: container, + }, + } + return event +} diff --git a/pkg/defaults/policies/files/pod_attach.json b/pkg/defaults/policies/files/pod_attach.json new file mode 100644 index 0000000000000..b192e09832d02 --- /dev/null +++ b/pkg/defaults/policies/files/pod_attach.json @@ -0,0 +1,47 @@ +{ + "id": "fe59d982-a155-4aac-bf36-a74aa29ea565", + "name": "Kubernetes Actions: Attach to Pod", + "description": "Alerts when Kubernetes API receives request to attach to a container", + "rationale": "'pods/attach' is non-standard approach for interacting with containers. Attackers with permissions could execute malicious code and compromise resources within a cluster", + "remediation": "Restrict RBAC access to the 'pods/attach' resource according to the Principle of Least Privilege. Limit such usage only to development, testing or debugging (non-production) activities", + "categories": [ + "Kubernetes Events" + ], + "lifecycleStages": [ + "RUNTIME" + ], + "eventSource": "DEPLOYMENT_EVENT", + "severity": "HIGH_SEVERITY", + "policyVersion": "1.1", + "policySections": [ + { + "policyGroups": [ + { + "fieldName": "Kubernetes Resource", + "values": [ + { + "value": "PODS_ATTACH" + } + ] + } + ] + } + ], + "mitreAttackVectors": [ + { + "tactic": "TA0002", + "techniques": [ + "T1609" + ] + }, + { + "tactic": "TA0002", + "techniques": [ + "T1059.004" + ] + } + ], + "criteriaLocked": true, + "mitreVectorsLocked": true, + "isDefault": true +} diff --git a/pkg/kubernetes/event.go b/pkg/kubernetes/event.go index de731ed609229..0e6cf1a17bad2 100644 --- a/pkg/kubernetes/event.go +++ b/pkg/kubernetes/event.go @@ -15,11 +15,13 @@ import ( const ( podExecOptionsKind = "PodExecOptions" podPortForwardOptionsKind = "PodPortForwardOptions" + podAttachOptionsKind = "PodAttachOptions" ) var ( supportedAPIVerbs = map[admission.Operation]storage.KubernetesEvent_APIVerb{ admission.Connect: storage.KubernetesEvent_CREATE, + admission.Create: storage.KubernetesEvent_CREATE, } universalDeserializer = serializer.NewCodecFactory(runtime.NewScheme()).UniversalDeserializer() @@ -53,6 +55,8 @@ func AdmissionRequestToKubeEventObj(req *admission.AdmissionRequest) (*storage.K return podExecEvent(req) case podPortForwardOptionsKind: return podPortForwardEvent(req) + case podAttachOptionsKind: + return podAttachEvent(req) default: return nil, ErrUnsupportedRequestKind.CausedByf("%q", req.Kind) } @@ -90,6 +94,37 @@ func podExecEvent(req *admission.AdmissionRequest) (*storage.KubernetesEvent, er }, nil } +func podAttachEvent(req *admission.AdmissionRequest) (*storage.KubernetesEvent, error) { + apiVerb, supported := supportedAPIVerbs[req.Operation] + if !supported { + return nil, ErrUnsupportedAPIVerb.CausedByf("%q", req.Operation) + } + + var obj core.PodAttachOptions + if _, _, err := universalDeserializer.Decode(req.Object.Raw, nil, &obj); err != nil { + return nil, err + } + + return &storage.KubernetesEvent{ + Id: string(req.UID), + ApiVerb: apiVerb, + Object: &storage.KubernetesEvent_Object{ + Name: req.Name, + Resource: storage.KubernetesEvent_Object_PODS_ATTACH, + Namespace: req.Namespace, + }, + ObjectArgs: &storage.KubernetesEvent_PodAttachArgs_{ + PodAttachArgs: &storage.KubernetesEvent_PodAttachArgs{ + Container: obj.Container, + }, + }, + User: &storage.KubernetesEvent_User{ + Username: req.UserInfo.Username, + Groups: req.UserInfo.Groups, + }, + }, nil +} + func podPortForwardEvent(req *admission.AdmissionRequest) (*storage.KubernetesEvent, error) { apiVerb, supported := supportedAPIVerbs[req.Operation] if !supported { diff --git a/proto/storage/kube_event.proto b/proto/storage/kube_event.proto index 69374a9e0177b..9585ef7055415 100644 --- a/proto/storage/kube_event.proto +++ b/proto/storage/kube_event.proto @@ -7,6 +7,7 @@ import "google/protobuf/timestamp.proto"; option go_package = "./storage;storage"; option java_package = "io.stackrox.proto.storage"; +//Next Tag: 22 message KubernetesEvent { enum APIVerb { UNKNOWN = 0; @@ -33,6 +34,7 @@ message KubernetesEvent { NETWORK_POLICIES = 7; SECURITY_CONTEXT_CONSTRAINTS = 8; EGRESS_FIREWALLS = 9; + PODS_ATTACH = 10; } string name = 1; // @gotags: policy:"Kubernetes Resource Name" @@ -51,6 +53,7 @@ message KubernetesEvent { oneof ObjectArgs { PodExecArgs pod_exec_args = 5; PodPortForwardArgs pod_port_forward_args = 6; + PodAttachArgs pod_attach_args = 21; } // Extended arguments. May not be available for pod exec and port forward events. @@ -71,6 +74,10 @@ message KubernetesEvent { repeated int32 ports = 1; } + message PodAttachArgs { + string container = 1; + } + message ResponseStatus { int32 status_code = 1; string reason = 2; diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 7148a2ae6383e..972c06db4ea17 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -10675,6 +10675,10 @@ { "name": "EGRESS_FIREWALLS", "integer": 9 + }, + { + "name": "PODS_ATTACH", + "integer": 10 } ] } @@ -10715,6 +10719,12 @@ "type": "PodPortForwardArgs", "oneof_parent": "ObjectArgs" }, + { + "id": 21, + "name": "pod_attach_args", + "type": "PodAttachArgs", + "oneof_parent": "ObjectArgs" + }, { "id": 15, "name": "user", @@ -10800,6 +10810,16 @@ } ] }, + { + "name": "PodAttachArgs", + "fields": [ + { + "id": 1, + "name": "container", + "type": "string" + } + ] + }, { "name": "ResponseStatus", "fields": [ diff --git a/qa-tests-backend/src/main/groovy/objects/Deployment.groovy b/qa-tests-backend/src/main/groovy/objects/Deployment.groovy index 3450e18b366e1..6ce3f38b1bda9 100644 --- a/qa-tests-backend/src/main/groovy/objects/Deployment.groovy +++ b/qa-tests-backend/src/main/groovy/objects/Deployment.groovy @@ -34,6 +34,8 @@ class Deployment { Map envValueFromResourceFieldRef = [:] Boolean isPrivileged = false Boolean readOnlyRootFilesystem = false + Boolean stdin = false + Boolean tty = false Map limits = [:] Map request = [:] Boolean hostNetwork = false @@ -236,6 +238,16 @@ class Deployment { return this } + Deployment setStdin(boolean val) { + this.stdin = val + return this + } + + Deployment setTty(boolean val) { + this.tty = val + return this + } + Deployment addLimits(String key, String val) { this.limits.put(key, val) return this diff --git a/qa-tests-backend/src/main/groovy/orchestratormanager/Kubernetes.groovy b/qa-tests-backend/src/main/groovy/orchestratormanager/Kubernetes.groovy index b3e69159f50ad..589119e7707b6 100644 --- a/qa-tests-backend/src/main/groovy/orchestratormanager/Kubernetes.groovy +++ b/qa-tests-backend/src/main/groovy/orchestratormanager/Kubernetes.groovy @@ -1933,6 +1933,59 @@ class Kubernetes { return execInContainerByPodName(name, namespace, splitCmd, retries) } + boolean attachToContainerByPodName(String name, String namespace, int retries = 1) { + // Wait for container 0 to be running first. + def timer = new Timer(retries, 1) + while (timer.IsValid()) { + def p = client.pods().inNamespace(namespace).withName(name).get() + if (p == null || p.status.containerStatuses.size() == 0) { + log.debug "First container in pod ${name} not yet running ..." + continue + } + def status = p.status.containerStatuses.get(0) + if (status.state.running != null) { + log.debug "First container in pod ${name} is running" + break + } + log.debug "First container in pod ${name} not yet running ..." + } + + final outputStream = new ByteArrayOutputStream() + final errorStream = new ByteArrayOutputStream() + log.debug("Attaching to the pod {}", name) + + // Disable retries for attach - we only need the request to reach the API server once + // to generate the pods/attach event. Retries would cause multiple events. + def originalRetryLimit = client.configuration.requestRetryBackoffLimit + client.configuration.setRequestRetryBackoffLimit(0) + + ExecWatch attachCmd = null + try { + // We fire and forget, we do not wait for completion like in the exec case + // because the invocation is what leads to the pods/attach event, and is hence sufficient + attachCmd = client.pods() + .inNamespace(namespace) + .withName(name) + .redirectingInput() + .writingOutput(outputStream) + .writingError(errorStream) + .attach() + + sleep(500) + return true + } catch (Exception e) { + log.debug("Error attaching to pod: {}", e.getMessage()) + // If the API server is reachable and the pod exists, any exception is likely a timeout + // or server response error - the pods/attach event was generated + // If the event truly wasn't generated (network issue), the test will fail later when it checks + // for violations + return true + } finally { + attachCmd?.close() + client.configuration.setRequestRetryBackoffLimit(originalRetryLimit) + } + } + private enum ExecStatus { UNKNOWN, SUCCESS, @@ -1945,6 +1998,10 @@ class Kubernetes { return execInContainerByPodName(deployment.pods.get(0).name, deployment.namespace, cmd, 30) } + boolean attachToContainer(Deployment deployment) { + return attachToContainerByPodName(deployment.pods.get(0).name, deployment.namespace, 30) + } + String generateYaml(Object orchestratorObject) { if (orchestratorObject instanceof NetworkPolicy) { return YamlGenerator.toYaml(createNetworkPolicyObject(orchestratorObject)) @@ -2317,6 +2374,8 @@ class Kubernetes { readOnlyRootFilesystem: deployment.readOnlyRootFilesystem, capabilities: new Capabilities(add: deployment.addCapabilities, drop: deployment.dropCapabilities)), + stdin: deployment.stdin, + tty: deployment.tty, ) // Allow override of imagePullPolicy for quay.io images. Typically used // to set to Never to help keep the list of quay.io prebuilt images up diff --git a/qa-tests-backend/src/test/groovy/K8sEventDetectionTest.groovy b/qa-tests-backend/src/test/groovy/K8sEventDetectionTest.groovy index 0c14ce2015de9..a01f20aa1dd64 100644 --- a/qa-tests-backend/src/test/groovy/K8sEventDetectionTest.groovy +++ b/qa-tests-backend/src/test/groovy/K8sEventDetectionTest.groovy @@ -1,16 +1,12 @@ import static util.Helpers.withRetry -import orchestratormanager.OrchestratorTypes - import io.stackrox.proto.storage.AlertOuterClass import io.stackrox.proto.storage.PolicyOuterClass import objects.Deployment import services.AlertService import services.PolicyService -import util.Env -import spock.lang.IgnoreIf import spock.lang.Tag import spock.lang.Unroll @@ -20,8 +16,10 @@ class K8sEventDetectionTest extends BaseSpecification { static private registerDeployment(String name, boolean privileged) { DEPLOYMENTS.add( new Deployment().setName(name) - .setImage(TEST_IMAGE).addLabel("app", name). - setPrivilegedFlag(privileged) + .setImage(TEST_IMAGE).addLabel("app", name) + .setPrivilegedFlag(privileged) + .setStdin(true) + .setTty(true) ) return name } @@ -35,45 +33,57 @@ class K8sEventDetectionTest extends BaseSpecification { static final private String KUBECTL_EXEC_POLICY_NAME = "Kubernetes Actions: Exec into Pod" static final private String CLONED_KUBECTL_EXEC_POLICY_NAME = "CLONED: Kubernetes Actions: Exec into Pod" + static final private String KUBECTL_ATTACH_POLICY_NAME = "Kubernetes Actions: Attach to Pod" + static final private String CLONED_KUBECTL_ATTACH_POLICY_NAME = "CLONED: Kubernetes Actions: Attach to Pod" + def setupSpec() { - if (Env.mustGetOrchestratorType() == OrchestratorTypes.OPENSHIFT) { - // K8s event detection is not supported on OpenShift. - return - } orchestrator.batchCreateDeployments(DEPLOYMENTS) for (Deployment deployment : DEPLOYMENTS) { assert Services.waitForDeployment(deployment) } - // If MITRE feature is enabled, work on the cloned policy instead of default policy. - def policy = Services.getPolicyByName(KUBECTL_EXEC_POLICY_NAME) - policy = PolicyService.createNewPolicy( - PolicyOuterClass.Policy.newBuilder(policy) + def execPolicy = Services.getPolicyByName(KUBECTL_EXEC_POLICY_NAME) + execPolicy = PolicyService.createNewPolicy( + PolicyOuterClass.Policy.newBuilder(execPolicy) .setId("") .setName(CLONED_KUBECTL_EXEC_POLICY_NAME) .setMitreVectorsLocked(false) .setCriteriaLocked(false) .build() ) - assert policy + assert execPolicy + + def attachPolicy = Services.getPolicyByName(KUBECTL_ATTACH_POLICY_NAME) + attachPolicy = PolicyService.createNewPolicy( + PolicyOuterClass.Policy.newBuilder(attachPolicy) + .setId("") + .setName(CLONED_KUBECTL_ATTACH_POLICY_NAME) + .setMitreVectorsLocked(false) + .setCriteriaLocked(false) + .build() + ) + assert attachPolicy Services.setPolicyDisabled(KUBECTL_EXEC_POLICY_NAME, true) + Services.setPolicyDisabled(KUBECTL_ATTACH_POLICY_NAME, true) } def cleanupSpec() { - if (Env.mustGetOrchestratorType() == OrchestratorTypes.OPENSHIFT) { - // K8s event detection is not supported on OpenShift. - return - } for (def deployment: DEPLOYMENTS) { orchestrator.deleteDeployment(deployment) } - def policy = Services.getPolicyByName(CLONED_KUBECTL_EXEC_POLICY_NAME) - if (policy) { - PolicyService.deletePolicy(policy.getId()) + def execPolicy = Services.getPolicyByName(CLONED_KUBECTL_EXEC_POLICY_NAME) + if (execPolicy) { + PolicyService.deletePolicy(execPolicy.getId()) } Services.setPolicyDisabled(KUBECTL_EXEC_POLICY_NAME, false) + + def attachPolicy = Services.getPolicyByName(CLONED_KUBECTL_ATTACH_POLICY_NAME) + if (attachPolicy) { + PolicyService.deletePolicy(attachPolicy.getId()) + } + Services.setPolicyDisabled(KUBECTL_ATTACH_POLICY_NAME, false) } def runExec(List deployments) { @@ -83,6 +93,13 @@ class K8sEventDetectionTest extends BaseSpecification { return true } + def runAttach(List deployments) { + for (def deployment: deployments) { + assert orchestrator.attachToContainer(deployment) + } + return true + } + def checkViolationsAreAsExpected(String policyName, List execedIntoDeploymentNames, List violatingDeploymentNames, Map podNames, int expectedK8sViolationsCount) { @@ -102,13 +119,21 @@ class K8sEventDetectionTest extends BaseSpecification { def podName = podNames.get(violatingDeploymentName) assert k8sSubViolations.size() == expectedK8sViolationsCount for (def subViolation: k8sSubViolations) { - assert subViolation.message == "Kubernetes API received exec 'ls -l' request into pod '${podName}'" + + if (policyName == CLONED_KUBECTL_EXEC_POLICY_NAME) { + assert subViolation.message == "Kubernetes API received exec 'ls -l' request into pod " + + "'${podName}' container '${violatingDeploymentName}'" + } + if (policyName == CLONED_KUBECTL_ATTACH_POLICY_NAME) { + assert subViolation.message == "Kubernetes API received attach request to pod '${podName}'" + " container '${violatingDeploymentName}'" + } def kvAttrs = subViolation.getKeyValueAttrs().getAttrsList() def podAttr = kvAttrs.find { it.key == "pod" } assert podAttr != null && podAttr.value == podName - def commandsAttr = kvAttrs.find { it.key == "commands" } - assert commandsAttr != null && commandsAttr.value == "ls -l" + if (policyName == CLONED_KUBECTL_EXEC_POLICY_NAME) { + def commandsAttr = kvAttrs.find { it.key == "commands" } + assert commandsAttr != null && commandsAttr.value == "ls -l" + } } // Ensure the deployment enrichment works. @@ -133,8 +158,6 @@ class K8sEventDetectionTest extends BaseSpecification { @Tag("BAT") @Tag("RUNTIME") @Tag("K8sEvents") - // K8s event detection is currently not supported on OpenShift. - @IgnoreIf({ Env.mustGetOrchestratorType() == OrchestratorTypes.OPENSHIFT }) def "Verify k8s exec detection into #execIntoDeploymentNames with addl groups #additionalPolicyGroups"() { when: "Create the deployments, modify the policy, exec into them" @@ -226,4 +249,98 @@ class K8sEventDetectionTest extends BaseSpecification { addValues(PolicyOuterClass.PolicyValue.newBuilder().setValue("true").build()). build(),] | [NGINX_2_DEP_NAME, PRIV_NGINX_2_DEPNAME] | [PRIV_NGINX_2_DEPNAME] } + + @Unroll + @Tag("BAT") + @Tag("RUNTIME") + @Tag("K8sEvents") + def "Verify k8s attach detection into #attachToDeploymentNames with addl groups #additionalPolicyGroups"() { + when: + "Create the deployments, modify the policy, attach to pods in them" + def originalPolicy = Services.getPolicyByName(CLONED_KUBECTL_ATTACH_POLICY_NAME) + assert originalPolicy != null && originalPolicy.getName() == CLONED_KUBECTL_ATTACH_POLICY_NAME + + def currentPolicy = originalPolicy + if (additionalPolicyGroups != null && additionalPolicyGroups.size() > 0) { + assert originalPolicy.getPolicySectionsCount() == 1 + def policySection = originalPolicy.getPolicySections(0) + def newPolicySection = PolicyOuterClass.PolicySection.newBuilder(policySection). + addAllPolicyGroups(additionalPolicyGroups). + build() + currentPolicy = PolicyOuterClass.Policy.newBuilder(originalPolicy). + clearPolicySections(). + addPolicySections(newPolicySection). + build() + Services.updatePolicy(currentPolicy) + // Sleep to allow policy update to propagate + sleep(3000) + } + + def podNames = new HashMap() + def attachToPodsInDeployments = [] + for (def deploymentName: attachToDeploymentNames) { + def deployment = DEPLOYMENTS.find { it.name == deploymentName } + assert deployment + attachToPodsInDeployments.add(deployment) + + def podsForDeployment = orchestrator.getPods(deployment.namespace, deployment.getLabels()["app"]) + assert podsForDeployment != null && podsForDeployment.size() == 1 + podNames.put(deployment.name, podsForDeployment.get(0).metadata.name) + } + + assert runAttach(attachToPodsInDeployments) + + then: + "Fetch violations and assert on properties" + assert checkViolationsAreAsExpected( + CLONED_KUBECTL_ATTACH_POLICY_NAME, attachToDeploymentNames, violatingDeploymentNames, podNames, 1, + ) + + when: + "Run another attach" + assert runAttach(attachToPodsInDeployments) + + then: + "Violations should have the new attach appended to them" + withRetry(2, 3) { + assert checkViolationsAreAsExpected( + CLONED_KUBECTL_ATTACH_POLICY_NAME, attachToDeploymentNames, violatingDeploymentNames, podNames, 2, + ) + } + + when: + "Update the policy to have enforcement" + currentPolicy = PolicyOuterClass.Policy.newBuilder(currentPolicy) + .clearEnforcementActions() + .addEnforcementActions(PolicyOuterClass.EnforcementAction.FAIL_KUBE_REQUEST_ENFORCEMENT) + .build() + Services.updatePolicy(currentPolicy) + // Allow to propagate + sleep(3000) + + then: + "Attach should fail for all violating deployments, but not for the others, and violations should not be updated" + for (def deploymentName: attachToDeploymentNames) { + def deployment = DEPLOYMENTS.find { it.name == deploymentName } + assert deployment + // these pods/attach events should be blocked by the enforced policy + orchestrator.attachToContainer(deployment) + } + + // Still the same number of k8s violations as previously since enforcement has caused + // the to should not have been updated + assert checkViolationsAreAsExpected( + CLONED_KUBECTL_ATTACH_POLICY_NAME, attachToDeploymentNames, violatingDeploymentNames, podNames, + 2, + ) + + cleanup: + Services.updatePolicy(originalPolicy) + + where: + "Data inputs are" + additionalPolicyGroups | attachToDeploymentNames | violatingDeploymentNames + + [] | [NGINX_1_DEP_NAME, NGINX_2_DEP_NAME] | [NGINX_1_DEP_NAME, NGINX_2_DEP_NAME] + } } diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx index 4d49a3e49c593..dfd6c397f8219 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx @@ -1409,9 +1409,13 @@ export const policyCriteriaDescriptors: Descriptor[] = [ value: 'PODS_EXEC', }, { - label: 'Pods port forward', + label: 'Pod port forward', value: 'PODS_PORTFORWARD', }, + { + label: 'Pod attach', + value: 'PODS_ATTACH', + }, ], canBooleanLogic: false, lifecycleStages: ['RUNTIME'], From bcd315bd96e5da0a45c54042b1f5e3d1c7c23d97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Feb 2026 13:27:50 -0500 Subject: [PATCH 122/232] chore(deps): bump google.golang.org/api from 0.264.0 to 0.265.0 (#18863) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7209a30c5f8c7..75624c9cc221c 100644 --- a/go.mod +++ b/go.mod @@ -150,7 +150,7 @@ require ( golang.org/x/time v0.14.0 golang.org/x/tools v0.41.0 golang.stackrox.io/grpc-http1 v0.5.1 - google.golang.org/api v0.264.0 + google.golang.org/api v0.265.0 google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b google.golang.org/grpc v1.78.0 @@ -504,7 +504,7 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect diff --git a/go.sum b/go.sum index 90c1294992949..14e1e80ce44c5 100644 --- a/go.sum +++ b/go.sum @@ -2177,8 +2177,8 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.264.0 h1:+Fo3DQXBK8gLdf8rFZ3uLu39JpOnhvzJrLMQSoSYZJM= -google.golang.org/api v0.264.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= +google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU= +google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2254,8 +2254,8 @@ google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From 9e6967244e67a66d0cbf3ede3a03c9066e25a96d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Feb 2026 14:13:49 -0500 Subject: [PATCH 123/232] chore(deps): bump github.com/mikefarah/yq/v4 from 4.50.1 to 4.52.2 in /operator/tools/yq (#18834) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- operator/tools/yq/go.mod | 14 +++++++------- operator/tools/yq/go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/operator/tools/yq/go.mod b/operator/tools/yq/go.mod index 9186bc4142a7a..7efb0bae0eb3f 100644 --- a/operator/tools/yq/go.mod +++ b/operator/tools/yq/go.mod @@ -2,7 +2,7 @@ module github.com/stackrox/rox/operator/tools/yq go 1.25 -require github.com/mikefarah/yq/v4 v4.50.1 +require github.com/mikefarah/yq/v4 v4.52.2 require ( github.com/a8m/envsubst v1.4.3 // indirect @@ -14,7 +14,7 @@ require ( github.com/fatih/color v1.18.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/goccy/go-json v0.10.5 // indirect - github.com/goccy/go-yaml v1.19.0 // indirect + github.com/goccy/go-yaml v1.19.2 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/hashicorp/hcl/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -29,11 +29,11 @@ require ( github.com/yuin/gopher-lua v1.1.1 // indirect github.com/zclconf/go-cty v1.17.0 // indirect go.yaml.in/yaml/v4 v4.0.0-rc.3 // indirect - golang.org/x/mod v0.30.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/text v0.32.0 // indirect - golang.org/x/tools v0.39.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/tools v0.40.0 // indirect gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect ) diff --git a/operator/tools/yq/go.sum b/operator/tools/yq/go.sum index 5410339ca3a34..39724fd59ef75 100644 --- a/operator/tools/yq/go.sum +++ b/operator/tools/yq/go.sum @@ -26,8 +26,8 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/goccy/go-yaml v1.19.0 h1:EmkZ9RIsX+Uq4DYFowegAuJo8+xdX3T/2dwNPXbxEYE= -github.com/goccy/go-yaml v1.19.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= +github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE= @@ -44,8 +44,8 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mikefarah/yq/v4 v4.50.1 h1:u7pnei4FIv4HGL5ZBuNVDhDBe9et1YRFnoTmKZw6zOY= -github.com/mikefarah/yq/v4 v4.50.1/go.mod h1:L4Z8NywrquZ+PVMz6IFFeGIp64eBC2mGC0nMryygCnI= +github.com/mikefarah/yq/v4 v4.52.2 h1:g38MGUsWO4y6Te1tzZy3fk6hZ9xknRHLzBaXVoqfAyI= +github.com/mikefarah/yq/v4 v4.52.2/go.mod h1:05ytoLM9RqcBCI73V3lqDL1gbQ29mEe573IslI9ibU8= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -72,19 +72,19 @@ github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmB go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go= go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= From 28c2e55d60ca0666eefc47aeac0d38156222d3c6 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Thu, 5 Feb 2026 15:52:49 -0500 Subject: [PATCH 124/232] chore(fix): fix delete group by (#18879) --- pkg/search/postgres/sac.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/search/postgres/sac.go b/pkg/search/postgres/sac.go index afcf21bed7002..1dcb66624f0d4 100644 --- a/pkg/search/postgres/sac.go +++ b/pkg/search/postgres/sac.go @@ -28,10 +28,8 @@ func enrichQueryWithSACFilter(ctx context.Context, q *v1.Query, schema *walker.S return q, nil } pagination := q.GetPagination() - groupBy := q.GetGroupBy() query := searchPkg.ConjunctionQuery(sacFilter, q) query.Pagination = pagination - query.GroupBy = groupBy return query, nil default: sacFilter, err := GetReadSACQuery(ctx, schema.ScopingResource) From e94e2e07e5363f0954a6439f5eaf28852667e38b Mon Sep 17 00:00:00 2001 From: David Vail Date: Thu, 5 Feb 2026 16:26:37 -0500 Subject: [PATCH 125/232] chore(ui): Update JSPDF to 4.1.0 (#18868) --- ui/apps/platform/package-lock.json | 17 +++++++++-------- ui/apps/platform/package.json | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ui/apps/platform/package-lock.json b/ui/apps/platform/package-lock.json index 577436a9fe529..2ead5ea6a3958 100644 --- a/ui/apps/platform/package-lock.json +++ b/ui/apps/platform/package-lock.json @@ -42,7 +42,7 @@ "html2canvas": "1.0.0-rc.7", "initials": "^3.1.2", "js-base64": "^3.7.2", - "jspdf": "^4.0.0", + "jspdf": "^4.1.0", "jspdf-autotable": "^5.0.7", "lodash": "^4.17.23", "mobx": "^6.13.7", @@ -8063,9 +8063,10 @@ } }, "node_modules/dompurify": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz", - "integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.1.tgz", + "integrity": "sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==", + "license": "(MPL-2.0 OR Apache-2.0)", "optionalDependencies": { "@types/trusted-types": "^2.0.7" } @@ -11799,9 +11800,9 @@ } }, "node_modules/jspdf": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.0.0.tgz", - "integrity": "sha512-w12U97Z6edKd2tXDn3LzTLg7C7QLJlx0BPfM3ecjK2BckUl9/81vZ+r5gK4/3KQdhAcEZhENUxRhtgYBj75MqQ==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/jspdf/-/jspdf-4.1.0.tgz", + "integrity": "sha512-xd1d/XRkwqnsq6FP3zH1Q+Ejqn2ULIJeDZ+FTKpaabVpZREjsJKRJwuokTNgdqOU+fl55KgbvgZ1pRTSWCP2kQ==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.28.4", @@ -11811,7 +11812,7 @@ "optionalDependencies": { "canvg": "^3.0.11", "core-js": "^3.6.0", - "dompurify": "^3.2.4", + "dompurify": "^3.3.1", "html2canvas": "^1.0.0-rc.5" } }, diff --git a/ui/apps/platform/package.json b/ui/apps/platform/package.json index d430234ac968b..f801c57a1051d 100644 --- a/ui/apps/platform/package.json +++ b/ui/apps/platform/package.json @@ -45,7 +45,7 @@ "html2canvas": "1.0.0-rc.7", "initials": "^3.1.2", "js-base64": "^3.7.2", - "jspdf": "^4.0.0", + "jspdf": "^4.1.0", "jspdf-autotable": "^5.0.7", "lodash": "^4.17.23", "mobx": "^6.13.7", From 08220433392164cf1025196fa1bb8dda4c23ee72 Mon Sep 17 00:00:00 2001 From: David Vail Date: Thu, 5 Feb 2026 16:27:21 -0500 Subject: [PATCH 126/232] ROX-32982: Fix default autocomplete behavior in search UI (#18852) --- .../components/AutocompleteSelect.tsx | 34 +++++---- .../components/CompoundSearchFilter.cy.jsx | 3 +- .../platform/src/utils/searchUtils.test.ts | 70 +++++++++++++++++++ ui/apps/platform/src/utils/searchUtils.ts | 18 ++++- 4 files changed, 111 insertions(+), 14 deletions(-) diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/components/AutocompleteSelect.tsx b/ui/apps/platform/src/Components/CompoundSearchFilter/components/AutocompleteSelect.tsx index 94dd1c51b9a94..dc23ae709b788 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/components/AutocompleteSelect.tsx +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/components/AutocompleteSelect.tsx @@ -18,7 +18,7 @@ import { SearchIcon, TimesIcon } from '@patternfly/react-icons'; import { useQuery } from '@apollo/client'; import SEARCH_AUTOCOMPLETE_QUERY from 'queries/searchAutocomplete'; import type { SearchAutocompleteQueryResponse } from 'queries/searchAutocomplete'; -import { getRequestQueryStringForSearchFilter } from 'utils/searchUtils'; +import { getRequestQueryStringForSearchFilter, wrapInQuotes } from 'utils/searchUtils'; import type { SearchFilter } from 'types/search'; import { ensureString } from 'utils/ensure'; @@ -160,13 +160,24 @@ function AutocompleteSelect({ setIsOpen((prev) => !prev); }; + // Wraps the value in quotes if it's an actual autocomplete suggestion from the backend, + // otherwise returns it unchanged. This provides exact-match search for suggestions + // and regex search for manual/fallback entries. + const applySelectedText = (rawValue: string | number) => { + const value = ensureString(rawValue); + const isAutocompleteSuggestion = data.searchAutocomplete.includes(value); + const valueToApply = isAutocompleteSuggestion ? wrapInQuotes(value) : value; + onChange(valueToApply); + onSearch(valueToApply); + setFilterValue(''); + }; + const onSelect = ( _event: ReactMouseEvent | undefined, value: string | number | undefined ) => { if (value) { - onChange(ensureString(value)); - setFilterValue(''); + applySelectedText(value); } setIsOpen(false); setFocusedItemIndex(null); @@ -214,16 +225,16 @@ function AutocompleteSelect({ const onInputKeyDown = (event: KeyboardEvent) => { const enabledMenuItems = selectOptions.filter((option) => !option.isDisabled); - const [firstMenuItem] = enabledMenuItems; - const focusedItem = focusedItemIndex ? enabledMenuItems[focusedItemIndex] : firstMenuItem; + const focusedItem = focusedItemIndex !== null ? enabledMenuItems[focusedItemIndex] : null; switch (event.key) { - // Select the first available option case 'Enter': - if (isOpen) { - const newValue = ensureString(focusedItem.value); - onChange(newValue); - onSearch(newValue); + if (isOpen && focusedItem) { + applySelectedText(focusedItem.value); + } else if (value) { + // Manual text entry - apply without quotes for regex search + onSearch(value); + onChange(''); setFilterValue(''); } @@ -310,8 +321,7 @@ function AutocompleteSelect({ isSelected={false} className={option.className} onClick={() => { - onChange(option.value); - onSearch(option.value); + applySelectedText(option.value); }} id={`select-typeahead-${option?.value?.replace(' ', '-space-')}`} {...option} diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/components/CompoundSearchFilter.cy.jsx b/ui/apps/platform/src/Components/CompoundSearchFilter/components/CompoundSearchFilter.cy.jsx index 580f006bac239..4fc2f7fb1ded6 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/components/CompoundSearchFilter.cy.jsx +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/components/CompoundSearchFilter.cy.jsx @@ -474,11 +474,12 @@ describe(Cypress.spec.relative, () => { cy.get(autocompleteMenuItems).eq(0).click(); + // Autocomplete selections are wrapped in quotes for exact-match search cy.get('@onSearch').should('have.been.calledWithExactly', [ { action: 'APPEND', category: 'Image', - value: 'docker.io/library/centos:7', + value: '"docker.io/library/centos:7"', }, ]); diff --git a/ui/apps/platform/src/utils/searchUtils.test.ts b/ui/apps/platform/src/utils/searchUtils.test.ts index ee0b7d9dd4d73..b59b8be9f611e 100644 --- a/ui/apps/platform/src/utils/searchUtils.test.ts +++ b/ui/apps/platform/src/utils/searchUtils.test.ts @@ -1,5 +1,6 @@ import type { GraphQLSortOption } from 'types/search'; import { + applyRegexSearchModifiers, convertSortToGraphQLFormat, convertSortToRestFormat, convertToExactMatch, @@ -11,6 +12,7 @@ import { getViewStateFromSearch, hasSearchKeyValue, searchValueAsArray, + wrapInQuotes, } from './searchUtils'; import type { NonEmptyArray } from './type.utils'; @@ -430,4 +432,72 @@ describe('searchUtils', () => { expect(result).toEqual({ Namespace: 'test', Cluster: 'test' }); }); }); + + describe('wrapInQuotes', () => { + it('wraps a string in double quotes', () => { + expect(wrapInQuotes('hello')).toBe('"hello"'); + expect(wrapInQuotes('test value')).toBe('"test value"'); + }); + + it('escapes internal double quotes', () => { + expect(wrapInQuotes('hello"world')).toBe('"hello\\"world"'); + expect(wrapInQuotes('say "hello"')).toBe('"say \\"hello\\""'); + }); + + it('handles empty string', () => { + expect(wrapInQuotes('')).toBe('""'); + }); + }); + + describe('applyRegexSearchModifiers', () => { + it('wraps text search values with regex modifier', () => { + const searchFilter = { Cluster: 'production' }; + const result = applyRegexSearchModifiers(searchFilter); + expect(result).toEqual({ Cluster: ['r/production'] }); + }); + + it('does not wrap quoted strings with regex modifier', () => { + const searchFilter = { Cluster: '"production"' }; + const result = applyRegexSearchModifiers(searchFilter); + expect(result).toEqual({ Cluster: ['"production"'] }); + }); + + it('handles mixed quoted and unquoted values', () => { + const searchFilter = { + Cluster: [ + 'production', + '"exact-match"', + 'staging', + '"exact-w-\\"quote"', + 'regex-w-"quote', + ], + }; + const result = applyRegexSearchModifiers(searchFilter); + expect(result).toEqual({ + Cluster: [ + 'r/production', + '"exact-match"', + 'r/staging', + '"exact-w-\\"quote"', + 'r/regex-w-"quote', + ], + }); + }); + + it('handles quoted strings with escaped quotes', () => { + const searchFilter = { Cluster: '"cluster\\"name"' }; + const result = applyRegexSearchModifiers(searchFilter); + expect(result).toEqual({ Cluster: ['"cluster\\"name"'] }); + }); + + it('only applies to text and autocomplete input types', () => { + const searchFilter = { + Cluster: 'production', // autocomplete field + 'Random Field': 'value', // not in regexSearchOptions + }; + const result = applyRegexSearchModifiers(searchFilter); + expect(result.Cluster).toEqual(['r/production']); + expect(result['Random Field']).toEqual('value'); // should not be modified + }); + }); }); diff --git a/ui/apps/platform/src/utils/searchUtils.ts b/ui/apps/platform/src/utils/searchUtils.ts index 5c462ad01235f..de8b0e49c03fe 100644 --- a/ui/apps/platform/src/utils/searchUtils.ts +++ b/ui/apps/platform/src/utils/searchUtils.ts @@ -428,15 +428,31 @@ const regexSearchOptions = [ .filter(({ inputType }) => inputType === 'text' || inputType === 'autocomplete') .map(({ searchTerm }) => searchTerm); +function isQuotedString(value: string): boolean { + return value.startsWith('"') && value.endsWith('"') && value.length >= 2; +} + +/** + * Wraps a string in double quotes, escaping any internal double quotes with backslashes. + * Used to indicate exact-match search values from autocomplete selections. + */ +export function wrapInQuotes(value: string): string { + const escapedValue = value.replace(/"/g, '\\"'); + return `"${escapedValue}"`; +} + /** * Adds the regex search modifier to the search filter for any search options that support it. + * Skips regex wrapping for values that are already quoted (exact-match strings). */ export function applyRegexSearchModifiers(searchFilter: SearchFilter): SearchFilter { const regexSearchFilter = cloneDeep(searchFilter); Object.entries(regexSearchFilter).forEach(([key, value]) => { if (regexSearchOptions.some((option) => option.toLowerCase() === key.toLowerCase())) { - regexSearchFilter[key] = searchValueAsArray(value).map((val) => `r/${val}`); + regexSearchFilter[key] = searchValueAsArray(value).map((val) => + isQuotedString(val) ? val : `r/${val}` + ); } }); From e55526dfda7d9fea2e697f4a1a6bc1132a377aa6 Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Thu, 5 Feb 2026 17:07:38 -0500 Subject: [PATCH 127/232] feat(enricher): move CVE info enrichment to enricher path (#18825) Co-authored-by: Claude Opus 4.5 --- central/cve/image/info/enricher/enricher.go | 139 +++++++++++ central/cve/image/info/enricher/singleton.go | 23 ++ central/enrichment/singleton.go | 5 +- central/graphdb/testutils/datastore.go | 3 - central/graphql/resolvers/test_setup_utils.go | 7 +- central/image/datastore/datastore.go | 5 +- .../datastore_bench_postgres_test.go | 7 +- central/image/datastore/datastore_impl.go | 104 +-------- .../datastore_impl_flat_postgres_test.go | 218 +----------------- .../datastore/datastore_test_constructors.go | 4 +- central/image/datastore/singleton.go | 3 +- central/imagev2/datastore/datastore.go | 5 +- .../datastore_bench_postgres_test.go | 7 +- central/imagev2/datastore/datastore_impl.go | 104 +-------- .../datastore/datastore_test_constructors.go | 4 +- central/imagev2/datastore/singleton.go | 3 +- .../datastoretest/datastore_impl_test.go | 4 +- central/pruning/pruning_test.go | 4 - central/reprocessor/reprocessor_test.go | 7 +- central/views/deployments/view_test.go | 3 - central/views/imagecveflat/view_test.go | 3 - .../querymgr/query_manager_impl_test.go | 4 - .../manager_impl_flat_cve_data_test.go | 4 - pkg/images/enricher/enricher.go | 12 +- pkg/images/enricher/enricher_impl.go | 23 ++ pkg/images/enricher/enricher_impl_test.go | 2 +- pkg/images/enricher/enricher_v2.go | 7 +- pkg/images/enricher/enricher_v2_impl.go | 27 ++- pkg/images/enricher/enricher_v2_impl_test.go | 2 +- pkg/images/enricher/mocks/enricher.go | 52 +++++ 30 files changed, 309 insertions(+), 486 deletions(-) create mode 100644 central/cve/image/info/enricher/enricher.go create mode 100644 central/cve/image/info/enricher/singleton.go diff --git a/central/cve/image/info/enricher/enricher.go b/central/cve/image/info/enricher/enricher.go new file mode 100644 index 0000000000000..d4d9220220585 --- /dev/null +++ b/central/cve/image/info/enricher/enricher.go @@ -0,0 +1,139 @@ +package enricher + +import ( + "context" + + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/cve" + "github.com/stackrox/rox/pkg/features" + imageEnricher "github.com/stackrox/rox/pkg/images/enricher" + "github.com/stackrox/rox/pkg/protocompat" + "github.com/stackrox/rox/pkg/sac" +) + +type enricherImpl struct { + imageCVEInfoDS imageCVEInfoDS.DataStore +} + +// New creates a new CVEInfoEnricher. +func New(imageCVEInfoDS imageCVEInfoDS.DataStore) imageEnricher.CVEInfoEnricher { + return &enricherImpl{ + imageCVEInfoDS: imageCVEInfoDS, + } +} + +// EnrichImageWithCVEInfo enriches a V1 image's CVEs with timing metadata. +func (e *enricherImpl) EnrichImageWithCVEInfo(ctx context.Context, image *storage.Image) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + scan := image.GetScan() + if scan == nil { + return nil + } + + // Populate the ImageCVEInfo table with CVE timing metadata + if err := e.upsertImageCVEInfos(ctx, scan); err != nil { + return err + } + + // Enrich the CVEs with accurate timestamps from lookup table + return e.enrichCVEsFromImageCVEInfo(ctx, scan) +} + +// EnrichImageV2WithCVEInfo enriches a V2 image's CVEs with timing metadata. +func (e *enricherImpl) EnrichImageV2WithCVEInfo(ctx context.Context, image *storage.ImageV2) error { + if !features.CVEFixTimestampCriteria.Enabled() { + return nil + } + + scan := image.GetScan() + if scan == nil { + return nil + } + + // Populate the ImageCVEInfo table with CVE timing metadata + if err := e.upsertImageCVEInfos(ctx, scan); err != nil { + return err + } + + // Enrich the CVEs with accurate timestamps from lookup table + return e.enrichCVEsFromImageCVEInfo(ctx, scan) +} + +// upsertImageCVEInfos populates the ImageCVEInfo lookup table with CVE timing metadata. +func (e *enricherImpl) upsertImageCVEInfos(ctx context.Context, scan *storage.ImageScan) error { + infos := make([]*storage.ImageCVEInfo, 0) + now := protocompat.TimestampNow() + + for _, component := range scan.GetComponents() { + for _, vuln := range component.GetVulns() { + // Determine fix available timestamp: use scanner-provided value if available, + // otherwise fabricate from scan time if the CVE is fixable (has a fix version). + // This handles non-Red Hat data sources that don't provide fix timestamps. + fixAvailableTimestamp := vuln.GetFixAvailableTimestamp() + if fixAvailableTimestamp == nil && vuln.GetFixedBy() != "" { + fixAvailableTimestamp = now + } + + info := &storage.ImageCVEInfo{ + Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), + Cve: vuln.GetCve(), + FixAvailableTimestamp: fixAvailableTimestamp, + FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing + } + infos = append(infos, info) + } + } + + if len(infos) == 0 { + return nil + } + + return e.imageCVEInfoDS.UpsertMany(sac.WithAllAccess(ctx), infos) +} + +// enrichCVEsFromImageCVEInfo enriches the image's CVEs with accurate timestamps from the lookup table. +func (e *enricherImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, scan *storage.ImageScan) error { + // Collect all IDs + ids := make([]string, 0) + for _, component := range scan.GetComponents() { + for _, vuln := range component.GetVulns() { + ids = append(ids, cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource())) + } + } + + if len(ids) == 0 { + return nil + } + + // Batch fetch + infos, err := e.imageCVEInfoDS.GetBatch(sac.WithAllAccess(ctx), ids) + if err != nil { + return err + } + + // Build lookup map + infoMap := make(map[string]*storage.ImageCVEInfo) + for _, info := range infos { + infoMap[info.GetId()] = info + } + + // Enrich CVEs + for _, component := range scan.GetComponents() { + for _, vuln := range component.GetVulns() { + id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) + if info, ok := infoMap[id]; ok { + if vuln.GetFixAvailableTimestamp() == nil && vuln.GetFixedBy() != "" { + // Set the fix timestamp if it was not provided by the scanner + vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() + } + vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() + } + } + } + + return nil +} diff --git a/central/cve/image/info/enricher/singleton.go b/central/cve/image/info/enricher/singleton.go new file mode 100644 index 0000000000000..ba5b452e54968 --- /dev/null +++ b/central/cve/image/info/enricher/singleton.go @@ -0,0 +1,23 @@ +package enricher + +import ( + imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" + imageEnricher "github.com/stackrox/rox/pkg/images/enricher" + "github.com/stackrox/rox/pkg/sync" +) + +var ( + once sync.Once + + instance imageEnricher.CVEInfoEnricher +) + +func initialize() { + instance = New(imageCVEInfoDS.Singleton()) +} + +// Singleton returns a singleton instance of CVEInfoEnricher. +func Singleton() imageEnricher.CVEInfoEnricher { + once.Do(initialize) + return instance +} diff --git a/central/enrichment/singleton.go b/central/enrichment/singleton.go index cb28eb5b95aca..ba236d20af19c 100644 --- a/central/enrichment/singleton.go +++ b/central/enrichment/singleton.go @@ -7,6 +7,7 @@ import ( baseImageMatcher "github.com/stackrox/rox/central/baseimage/matcher" clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" "github.com/stackrox/rox/central/cve/fetcher" + cveInfoEnricher "github.com/stackrox/rox/central/cve/image/info/enricher" nodeCVEDataStore "github.com/stackrox/rox/central/cve/node/datastore" delegatedRegistryConfigDS "github.com/stackrox/rox/central/delegatedregistryconfig/datastore" "github.com/stackrox/rox/central/delegatedregistryconfig/delegator" @@ -59,11 +60,11 @@ func initialize() { ) if !features.FlattenImageData.Enabled() { - imgEnricher = imageEnricher.New(suppressor.Singleton(), imageintegration.Set(), + imgEnricher = imageEnricher.New(suppressor.Singleton(), cveInfoEnricher.Singleton(), imageintegration.Set(), metrics.CentralSubsystem, cache.ImageMetadataCacheSingleton(), baseImageMatcher.Singleton().MatchWithBaseImages, datastore.Singleton().GetImage, reporter.Singleton(), signatureIntegrationDataStore.Singleton().GetAllSignatureIntegrations, scanDelegator) } else { - imgEnricherV2 = imageEnricher.NewV2(suppressor.Singleton(), imageintegration.Set(), + imgEnricherV2 = imageEnricher.NewV2(suppressor.Singleton(), cveInfoEnricher.Singleton(), imageintegration.Set(), metrics.CentralSubsystem, cache.ImageMetadataCacheSingleton(), baseImageMatcher.Singleton().MatchWithBaseImages, imageV2DataStore.Singleton().GetImage, reporter.Singleton(), signatureIntegrationDataStore.Singleton().GetAllSignatureIntegrations, scanDelegator) } diff --git a/central/graphdb/testutils/datastore.go b/central/graphdb/testutils/datastore.go index 64a757be049b7..fda32e355feb0 100644 --- a/central/graphdb/testutils/datastore.go +++ b/central/graphdb/testutils/datastore.go @@ -7,7 +7,6 @@ import ( clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" cveConverterV2 "github.com/stackrox/rox/central/cve/converter/v2" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" deploymentDataStore "github.com/stackrox/rox/central/deployment/datastore" imageDataStore "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -308,13 +307,11 @@ func NewTestGraphDataStore(t *testing.T) (TestGraphDataStore, error) { s.pgtestbase = pgtest.ForT(t) s.nodeStore = nodeDataStore.GetTestPostgresDataStore(t, s.GetPostgresPool()) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, s.GetPostgresPool()) s.imageStore = imageDataStore.NewWithPostgres( imagePostgresV2.New(s.GetPostgresPool(), false, concurrency.NewKeyFence()), riskDS.GetTestPostgresDataStore(t, s.GetPostgresPool()), ranking.NewRanker(), ranking.NewRanker(), - imageCVEInfo, ) s.deploymentStore, err = deploymentDataStore.GetTestPostgresDataStore(t, s.GetPostgresPool()) if err != nil { diff --git a/central/graphql/resolvers/test_setup_utils.go b/central/graphql/resolvers/test_setup_utils.go index 33c9b980f1acc..56b485481dd8c 100644 --- a/central/graphql/resolvers/test_setup_utils.go +++ b/central/graphql/resolvers/test_setup_utils.go @@ -12,7 +12,6 @@ import ( clusterCVEEdgePostgres "github.com/stackrox/rox/central/clustercveedge/datastore/store/postgres" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" clusterCVEPostgres "github.com/stackrox/rox/central/cve/cluster/datastore/store/postgres" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEV2DS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEV2Postgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" nodeCVEDataStore "github.com/stackrox/rox/central/cve/node/datastore" @@ -142,7 +141,7 @@ func SetupTestResolver(t testing.TB, datastores ...interface{}) (*Resolver, *gra } // CreateTestImageV2Datastore creates image datastore for testing -func CreateTestImageV2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageDS.DataStore { +func CreateTestImageV2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageDS.DataStore { risks := mockRisks.NewMockDataStore(ctrl) risks.EXPECT().RemoveRisk(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return imageDS.NewWithPostgres( @@ -150,12 +149,11 @@ func CreateTestImageV2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl risks, ranking.NewRanker(), ranking.NewRanker(), - imageCVEInfoDS.GetTestPostgresDataStore(t, testDB.DB), ) } // CreateTestImageV2V2Datastore creates image datastore for testing -func CreateTestImageV2V2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageV2DS.DataStore { +func CreateTestImageV2V2Datastore(_ testing.TB, testDB *pgtest.TestPostgres, ctrl *gomock.Controller) imageV2DS.DataStore { risks := mockRisks.NewMockDataStore(ctrl) risks.EXPECT().RemoveRisk(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return imageV2DS.NewWithPostgres( @@ -163,7 +161,6 @@ func CreateTestImageV2V2Datastore(t testing.TB, testDB *pgtest.TestPostgres, ctr risks, ranking.NewRanker(), ranking.NewRanker(), - imageCVEInfoDS.GetTestPostgresDataStore(t, testDB.DB), ) } diff --git a/central/image/datastore/datastore.go b/central/image/datastore/datastore.go index f16ac3d335117..5d4fee9f0987e 100644 --- a/central/image/datastore/datastore.go +++ b/central/image/datastore/datastore.go @@ -3,7 +3,6 @@ package datastore import ( "context" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/store" "github.com/stackrox/rox/central/ranking" riskDS "github.com/stackrox/rox/central/risk/datastore" @@ -41,8 +40,8 @@ type DataStore interface { // NewWithPostgres returns a new instance of DataStore using the input store, and searcher. // noUpdateTimestamps controls whether timestamps are automatically updated when upserting images. // This should be set to `false` except for some tests. -func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, imageCVEInfo imageCVEInfoDS.DataStore) DataStore { - ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker, imageCVEInfo) +func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) DataStore { + ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker) go ds.initializeRankers() return ds } diff --git a/central/image/datastore/datastore_bench_postgres_test.go b/central/image/datastore/datastore_bench_postgres_test.go index e6019176f342f..123dfb8c5337c 100644 --- a/central/image/datastore/datastore_bench_postgres_test.go +++ b/central/image/datastore/datastore_bench_postgres_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" "github.com/stackrox/rox/central/ranking" @@ -30,8 +29,7 @@ func BenchmarkImageGetMany(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) - datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) + datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) ids := make([]string, 0, 100) images := make([]*storage.Image, 0, 100) @@ -70,8 +68,7 @@ func BenchmarkImageUpsert(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) - datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) + datastore := NewWithPostgres(pgStoreV2.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) images := make([]*storage.Image, 0, 100) for i := 0; i < 100; i++ { diff --git a/central/image/datastore/datastore_impl.go b/central/image/datastore/datastore_impl.go index 3d38d3f8d02c2..7bf178ff7f2c7 100644 --- a/central/image/datastore/datastore_impl.go +++ b/central/image/datastore/datastore_impl.go @@ -6,7 +6,6 @@ import ( "time" "github.com/pkg/errors" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/image/datastore/store" "github.com/stackrox/rox/central/image/views" @@ -16,13 +15,10 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" - "github.com/stackrox/rox/pkg/cve" "github.com/stackrox/rox/pkg/errorhelpers" - "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/images/enricher" imageTypes "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/logging" - "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/scancomponent" @@ -44,13 +40,10 @@ type datastoreImpl struct { imageRanker *ranking.Ranker imageComponentRanker *ranking.Ranker - - imageCVEInfoDS imageCVEInfoDS.DataStore } func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, - imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, - imageCVEInfo imageCVEInfoDS.DataStore) *datastoreImpl { + imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) *datastoreImpl { ds := &datastoreImpl{ storage: storage, @@ -59,8 +52,6 @@ func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, imageRanker: imageRanker, imageComponentRanker: imageComponentRanker, - imageCVEInfoDS: imageCVEInfo, - keyedMutex: concurrency.NewKeyedMutex(globaldb.DefaultDataStorePoolSize), } return ds @@ -274,20 +265,6 @@ func (ds *datastoreImpl) UpsertImage(ctx context.Context, image *storage.Image) ds.keyedMutex.Lock(image.GetId()) defer ds.keyedMutex.Unlock(image.GetId()) - if features.CVEFixTimestampCriteria.Enabled() { - // Populate the ImageCVEInfo table with CVE timing metadata - if err := ds.upsertImageCVEInfos(ctx, image); err != nil { - log.Warnf("Failed to upsert ImageCVEInfo: %v", err) - // Non-fatal, continue with image upsert - } - - // Enrich the CVEs with accurate timestamps from lookup table - if err := ds.enrichCVEsFromImageCVEInfo(ctx, image); err != nil { - log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) - // Non-fatal, continue with image upsert - } - } - ds.updateComponentRisk(image) enricher.FillScanStats(image) @@ -393,85 +370,6 @@ func (ds *datastoreImpl) updateComponentRisk(image *storage.Image) { } } -// upsertImageCVEInfos populates the ImageCVEInfo lookup table with CVE timing metadata. -func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage.Image) error { - if !features.CVEFixTimestampCriteria.Enabled() { - return nil - } - - infos := make([]*storage.ImageCVEInfo, 0) - now := protocompat.TimestampNow() - - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - // Determine fix available timestamp: use scanner-provided value if available, - // otherwise fabricate from scan time if the CVE is fixable (has a fix version). - // This handles non-Red Hat data sources that don't provide fix timestamps. - fixAvailableTimestamp := vuln.GetFixAvailableTimestamp() - if fixAvailableTimestamp == nil && vuln.GetFixedBy() != "" { - fixAvailableTimestamp = now - } - - info := &storage.ImageCVEInfo{ - Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), - Cve: vuln.GetCve(), - FixAvailableTimestamp: fixAvailableTimestamp, - FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing - } - infos = append(infos, info) - } - } - - return ds.imageCVEInfoDS.UpsertMany(ctx, infos) -} - -// enrichCVEsFromImageCVEInfo enriches the image's CVEs with accurate timestamps from the lookup table. -func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image *storage.Image) error { - if !features.CVEFixTimestampCriteria.Enabled() { - return nil - } - - // Collect all IDs - ids := make([]string, 0) - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - ids = append(ids, cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource())) - } - } - - if len(ids) == 0 { - return nil - } - - // Batch fetch - infos, err := ds.imageCVEInfoDS.GetBatch(ctx, ids) - if err != nil { - return err - } - - // Build lookup map - infoMap := make(map[string]*storage.ImageCVEInfo) - for _, info := range infos { - infoMap[info.GetId()] = info - } - - // Enrich CVEs and blank out datasource after using it - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) - if info, ok := infoMap[id]; ok { - if vuln.GetFixAvailableTimestamp() == nil && vuln.GetFixedBy() != "" { - // Set the fix timestamp if it was not provided by the scanner - vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() - } - vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() - } - } - } - - return nil -} - // ImageSearchResultConverter converts image search results to proto search results type ImageSearchResultConverter struct{} diff --git a/central/image/datastore/datastore_impl_flat_postgres_test.go b/central/image/datastore/datastore_impl_flat_postgres_test.go index 80cb5e9c1ade3..adb8d6d52eaf4 100644 --- a/central/image/datastore/datastore_impl_flat_postgres_test.go +++ b/central/image/datastore/datastore_impl_flat_postgres_test.go @@ -8,7 +8,6 @@ import ( "sort" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEDS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEPostgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" "github.com/stackrox/rox/central/image/datastore/keyfence" @@ -45,14 +44,13 @@ func TestImageFlatDataStoreWithPostgres(t *testing.T) { type ImageFlatPostgresDataStoreTestSuite struct { suite.Suite - ctx context.Context - testDB *pgtest.TestPostgres - db postgres.DB - datastore DataStore - mockRisk *mockRisks.MockDataStore - componentDataStore imageComponentDS.DataStore - cveDataStore imageCVEDS.DataStore - imageCVEInfoDatastore imageCVEInfoDS.DataStore + ctx context.Context + testDB *pgtest.TestPostgres + db postgres.DB + datastore DataStore + mockRisk *mockRisks.MockDataStore + componentDataStore imageComponentDS.DataStore + cveDataStore imageCVEDS.DataStore } func (s *ImageFlatPostgresDataStoreTestSuite) SetupSuite() { @@ -63,9 +61,8 @@ func (s *ImageFlatPostgresDataStoreTestSuite) SetupSuite() { func (s *ImageFlatPostgresDataStoreTestSuite) SetupTest() { s.mockRisk = mockRisks.NewMockDataStore(gomock.NewController(s.T())) - s.imageCVEInfoDatastore = imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.db) dbStore := pgStoreV2.New(s.db, false, keyfence.ImageKeyFenceSingleton()) - s.datastore = NewWithPostgres(dbStore, s.mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), s.imageCVEInfoDatastore) + s.datastore = NewWithPostgres(dbStore, s.mockRisk, ranking.ImageRanker(), ranking.ComponentRanker()) componentStorage := imageComponentPostgres.New(s.db) s.componentDataStore = imageComponentDS.New(componentStorage, s.mockRisk, ranking.NewRanker()) @@ -639,202 +636,3 @@ func cloneAndUpdateRiskPriority(image *storage.Image) *storage.Image { cloned.LastUpdated = image.GetLastUpdated() return cloned } - -// TestImageCVEInfoIntegration tests the ImageCVEInfo lookup table integration -func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_PopulatesTable() { - // Enable the feature flag - s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") - - ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( - sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), - sac.ResourceScopeKeys(resources.Image), - )) - - // Create an image with CVEs that have fix timestamps - fixTime := protocompat.TimestampNow() - image := &storage.Image{ - Id: "test-image-cve-info", - Name: &storage.ImageName{ - FullName: "test/image:tag", - }, - Scan: &storage.ImageScan{ - OperatingSystem: "debian", - ScanTime: protocompat.TimestampNow(), - Components: []*storage.EmbeddedImageScanComponent{ - { - Name: "openssl", - Version: "1.1.1", - Vulns: []*storage.EmbeddedVulnerability{ - { - Cve: "CVE-2021-1234", - VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, - Datasource: "debian-bookworm-updater::debian:12", - FixAvailableTimestamp: fixTime, - }, - }, - }, - }, - }, - } - - // Upsert the image - s.NoError(s.datastore.UpsertImage(ctx, image)) - - // Verify ImageCVEInfo was populated - expectedID := pkgCVE.ImageCVEInfoID("CVE-2021-1234", "openssl", "debian-bookworm-updater::debian:12") - info, found, err := s.imageCVEInfoDatastore.Get(ctx, expectedID) - s.NoError(err) - s.True(found, "ImageCVEInfo should be populated after image upsert") - s.NotNil(info.GetFirstSystemOccurrence(), "FirstSystemOccurrence should be set") - s.Equal(fixTime.GetSeconds(), info.GetFixAvailableTimestamp().GetSeconds(), "FixAvailableTimestamp should match") -} - -func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_EnrichesFromLookupTable() { - // Enable the feature flag - s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") - - ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( - sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), - sac.ResourceScopeKeys(resources.Image), - )) - - // Pre-populate ImageCVEInfo with known timestamps - earlierTime := protocompat.TimestampNow() - earlierTime.Seconds -= 86400 // 1 day ago - - preExistingInfo := &storage.ImageCVEInfo{ - Id: pkgCVE.ImageCVEInfoID("CVE-2021-5678", "curl", "debian-updater::debian:11"), - Cve: "CVE-2021-5678", - FixAvailableTimestamp: earlierTime, - FirstSystemOccurrence: earlierTime, - } - s.NoError(s.imageCVEInfoDatastore.Upsert(ctx, preExistingInfo)) - - // Create an image with a CVE that matches the pre-existing info - image := &storage.Image{ - Id: "test-image-enrich", - Name: &storage.ImageName{ - FullName: "test/enrich:tag", - }, - Scan: &storage.ImageScan{ - OperatingSystem: "debian", - ScanTime: protocompat.TimestampNow(), - Components: []*storage.EmbeddedImageScanComponent{ - { - Name: "curl", - Version: "7.68.0", - Vulns: []*storage.EmbeddedVulnerability{ - { - Cve: "CVE-2021-5678", - VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, - Datasource: "debian-updater::debian:11", - // No fix timestamp set - should be enriched from lookup - }, - }, - }, - }, - }, - } - - // Upsert the image - s.NoError(s.datastore.UpsertImage(ctx, image)) - - // Retrieve the image and verify CVE was enriched - storedImage, found, err := s.datastore.GetImage(ctx, image.GetId()) - s.NoError(err) - s.True(found) - - // Check that the CVE was enriched with timestamps from the lookup table - vuln := storedImage.GetScan().GetComponents()[0].GetVulns()[0] - s.NotNil(vuln.GetFirstSystemOccurrence(), "FirstSystemOccurrence should be enriched") - s.Equal(earlierTime.GetSeconds(), vuln.GetFirstSystemOccurrence().GetSeconds(), "FirstSystemOccurrence should match pre-existing value") -} - -func (s *ImageFlatPostgresDataStoreTestSuite) TestImageCVEInfoIntegration_PreservesEarlierTimestamps() { - // Enable the feature flag - s.T().Setenv("ROX_CVE_FIX_TIMESTAMP", "true") - - ctx := sac.WithGlobalAccessScopeChecker(context.Background(), sac.AllowFixedScopes( - sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), - sac.ResourceScopeKeys(resources.Image), - )) - - // First image upsert - establishes initial timestamps - firstFixTime := protocompat.TimestampNow() - firstFixTime.Seconds -= 86400 // 1 day ago - - image1 := &storage.Image{ - Id: "test-image-preserve-1", - Name: &storage.ImageName{ - FullName: "test/preserve:v1", - }, - Scan: &storage.ImageScan{ - OperatingSystem: "alpine", - ScanTime: protocompat.TimestampNow(), - Components: []*storage.EmbeddedImageScanComponent{ - { - Name: "busybox", - Version: "1.33.0", - Vulns: []*storage.EmbeddedVulnerability{ - { - Cve: "CVE-2021-9999", - VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, - Datasource: "alpine-updater::alpine:3.14", - FixAvailableTimestamp: firstFixTime, - }, - }, - }, - }, - }, - } - s.NoError(s.datastore.UpsertImage(ctx, image1)) - - // Get the first system occurrence time - infoID := pkgCVE.ImageCVEInfoID("CVE-2021-9999", "busybox", "alpine-updater::alpine:3.14") - info1, found, err := s.imageCVEInfoDatastore.Get(ctx, infoID) - s.NoError(err) - s.True(found) - firstOccurrence := info1.GetFirstSystemOccurrence() - - // Second image upsert with a later fix timestamp - should preserve earlier timestamps - laterFixTime := protocompat.TimestampNow() - - image2 := &storage.Image{ - Id: "test-image-preserve-2", - Name: &storage.ImageName{ - FullName: "test/preserve:v2", - }, - Scan: &storage.ImageScan{ - OperatingSystem: "alpine", - ScanTime: protocompat.TimestampNow(), - Components: []*storage.EmbeddedImageScanComponent{ - { - Name: "busybox", - Version: "1.33.1", - Vulns: []*storage.EmbeddedVulnerability{ - { - Cve: "CVE-2021-9999", - VulnerabilityType: storage.EmbeddedVulnerability_IMAGE_VULNERABILITY, - Datasource: "alpine-updater::alpine:3.14", - FixAvailableTimestamp: laterFixTime, // Later timestamp - }, - }, - }, - }, - }, - } - s.NoError(s.datastore.UpsertImage(ctx, image2)) - - // Verify earlier timestamps are preserved - info2, found, err := s.imageCVEInfoDatastore.Get(ctx, infoID) - s.NoError(err) - s.True(found) - - // FirstSystemOccurrence should remain the same (earlier value preserved) - s.Equal(firstOccurrence.GetSeconds(), info2.GetFirstSystemOccurrence().GetSeconds(), - "FirstSystemOccurrence should preserve the earlier timestamp") - - // FixAvailableTimestamp should also preserve the earlier value - s.Equal(firstFixTime.GetSeconds(), info2.GetFixAvailableTimestamp().GetSeconds(), - "FixAvailableTimestamp should preserve the earlier timestamp") -} diff --git a/central/image/datastore/datastore_test_constructors.go b/central/image/datastore/datastore_test_constructors.go index 2b4b9d816ab7c..092ee90487bd6 100644 --- a/central/image/datastore/datastore_test_constructors.go +++ b/central/image/datastore/datastore_test_constructors.go @@ -3,7 +3,6 @@ package datastore import ( "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" "github.com/stackrox/rox/central/ranking" @@ -17,6 +16,5 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) DataStore { riskStore := riskDS.GetTestPostgresDataStore(t, pool) imageRanker := ranking.ImageRanker() imageComponentRanker := ranking.ComponentRanker() - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) - return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker, imageCVEInfo) + return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker) } diff --git a/central/image/datastore/singleton.go b/central/image/datastore/singleton.go index f8e48e2342370..1c4c9553c4828 100644 --- a/central/image/datastore/singleton.go +++ b/central/image/datastore/singleton.go @@ -1,7 +1,6 @@ package datastore import ( - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStoreV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -18,7 +17,7 @@ var ( func initialize() { storage := pgStoreV2.New(globaldb.GetPostgres(), false, keyfence.ImageKeyFenceSingleton()) - ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfoDS.Singleton()) + ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker()) } // Singleton provides the interface for non-service external interaction. diff --git a/central/imagev2/datastore/datastore.go b/central/imagev2/datastore/datastore.go index 1a78cb2debad5..822590f67a77d 100644 --- a/central/imagev2/datastore/datastore.go +++ b/central/imagev2/datastore/datastore.go @@ -3,7 +3,6 @@ package datastore import ( "context" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/imagev2/datastore/store" "github.com/stackrox/rox/central/imagev2/views" "github.com/stackrox/rox/central/ranking" @@ -39,8 +38,8 @@ type DataStore interface { } // NewWithPostgres returns a new instance of DataStore using the input store, and searcher. -func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, imageCVEInfo imageCVEInfoDS.DataStore) DataStore { - ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker, imageCVEInfo) +func NewWithPostgres(storage store.Store, risks riskDS.DataStore, imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) DataStore { + ds := newDatastoreImpl(storage, risks, imageRanker, imageComponentRanker) go ds.initializeRankers() return ds } diff --git a/central/imagev2/datastore/datastore_bench_postgres_test.go b/central/imagev2/datastore/datastore_bench_postgres_test.go index 97fcc86dcb9e7..db7d1248ca418 100644 --- a/central/imagev2/datastore/datastore_bench_postgres_test.go +++ b/central/imagev2/datastore/datastore_bench_postgres_test.go @@ -7,7 +7,6 @@ import ( "fmt" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/image/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" "github.com/stackrox/rox/central/ranking" @@ -33,8 +32,7 @@ func BenchmarkImageGetMany(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) - datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) + datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) ids := make([]string, 0, 100) images := make([]*storage.ImageV2, 0, 100) @@ -76,8 +74,7 @@ func BenchmarkImageUpsert(b *testing.B) { db := testDB.DB mockRisk := mockRisks.NewMockDataStore(gomock.NewController(b)) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(b, db) - datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) + datastore := NewWithPostgres(pgStore.New(db, false, keyfence.ImageKeyFenceSingleton()), mockRisk, ranking.NewRanker(), ranking.NewRanker()) images := make([]*storage.ImageV2, 0, 100) for i := 0; i < 100; i++ { diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index ab3273447dfee..461fb17c2bb27 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -6,7 +6,6 @@ import ( "time" "github.com/pkg/errors" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/imagev2/datastore/store" "github.com/stackrox/rox/central/imagev2/views" @@ -16,12 +15,9 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" - "github.com/stackrox/rox/pkg/cve" "github.com/stackrox/rox/pkg/errorhelpers" - "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" - "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/scancomponent" @@ -44,13 +40,10 @@ type datastoreImpl struct { imageRanker *ranking.Ranker imageComponentRanker *ranking.Ranker - - imageCVEInfoDS imageCVEInfoDS.DataStore } func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, - imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker, - imageCVEInfo imageCVEInfoDS.DataStore) *datastoreImpl { + imageRanker *ranking.Ranker, imageComponentRanker *ranking.Ranker) *datastoreImpl { ds := &datastoreImpl{ storage: storage, @@ -59,8 +52,6 @@ func newDatastoreImpl(storage store.Store, risks riskDS.DataStore, imageRanker: imageRanker, imageComponentRanker: imageComponentRanker, - imageCVEInfoDS: imageCVEInfo, - keyedMutex: concurrency.NewKeyedMutex(globaldb.DefaultDataStorePoolSize), } return ds @@ -280,20 +271,6 @@ func (ds *datastoreImpl) UpsertImage(ctx context.Context, image *storage.ImageV2 ds.keyedMutex.Lock(image.GetId()) defer ds.keyedMutex.Unlock(image.GetId()) - if features.CVEFixTimestampCriteria.Enabled() { - // Populate the ImageCVEInfo lookup table with CVE timing metadata - if err := ds.upsertImageCVEInfos(ctx, image); err != nil { - log.Warnf("Failed to upsert ImageCVEInfo: %v", err) - // Non-fatal, continue with image upsert - } - - // Enrich the CVEs with accurate timestamps from lookup table - if err := ds.enrichCVEsFromImageCVEInfo(ctx, image); err != nil { - log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) - // Non-fatal, continue with image upsert - } - } - ds.updateComponentRisk(image) utils.FillScanStatsV2(image) @@ -399,85 +376,6 @@ func (ds *datastoreImpl) updateComponentRisk(image *storage.ImageV2) { } } -// upsertImageCVEInfos populates the ImageCVEInfo lookup table with CVE timing metadata. -func (ds *datastoreImpl) upsertImageCVEInfos(ctx context.Context, image *storage.ImageV2) error { - if !features.CVEFixTimestampCriteria.Enabled() { - return nil - } - - infos := make([]*storage.ImageCVEInfo, 0) - now := protocompat.TimestampNow() - - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - // Determine fix available timestamp: use scanner-provided value if available, - // otherwise fabricate from scan time if the CVE is fixable (has a fix version). - // This handles non-Red Hat data sources that don't provide fix timestamps. - fixAvailableTimestamp := vuln.GetFixAvailableTimestamp() - if fixAvailableTimestamp == nil && vuln.GetFixedBy() != "" { - fixAvailableTimestamp = now - } - - info := &storage.ImageCVEInfo{ - Id: cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()), - Cve: vuln.GetCve(), - FixAvailableTimestamp: fixAvailableTimestamp, - FirstSystemOccurrence: now, // Smart upsert in ImageCVEInfo datastore preserves existing - } - infos = append(infos, info) - } - } - - return ds.imageCVEInfoDS.UpsertMany(ctx, infos) -} - -// enrichCVEsFromImageCVEInfo enriches the image's CVEs with accurate timestamps from the lookup table. -func (ds *datastoreImpl) enrichCVEsFromImageCVEInfo(ctx context.Context, image *storage.ImageV2) error { - if !features.CVEFixTimestampCriteria.Enabled() { - return nil - } - - // Collect all IDs - ids := make([]string, 0) - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - ids = append(ids, cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource())) - } - } - - if len(ids) == 0 { - return nil - } - - // Batch fetch - infos, err := ds.imageCVEInfoDS.GetBatch(ctx, ids) - if err != nil { - return err - } - - // Build lookup map - infoMap := make(map[string]*storage.ImageCVEInfo) - for _, info := range infos { - infoMap[info.GetId()] = info - } - - // Enrich CVEs and blank out datasource after using it - for _, component := range image.GetScan().GetComponents() { - for _, vuln := range component.GetVulns() { - id := cve.ImageCVEInfoID(vuln.GetCve(), component.GetName(), vuln.GetDatasource()) - if info, ok := infoMap[id]; ok { - if vuln.GetFixAvailableTimestamp() == nil && vuln.GetFixedBy() != "" { - // Set the fix timestamp if it was not provided by the scanner - vuln.FixAvailableTimestamp = info.GetFixAvailableTimestamp() - } - vuln.FirstSystemOccurrence = info.GetFirstSystemOccurrence() - } - } - } - - return nil -} - // ImageSearchResultConverter implements search.SearchResultConverter for image search results. // This enables single-pass query construction for SearchResult protos. type ImageSearchResultConverter struct{} diff --git a/central/imagev2/datastore/datastore_test_constructors.go b/central/imagev2/datastore/datastore_test_constructors.go index 8ad2d8401c852..0fa3f8dc5e127 100644 --- a/central/imagev2/datastore/datastore_test_constructors.go +++ b/central/imagev2/datastore/datastore_test_constructors.go @@ -3,7 +3,6 @@ package datastore import ( "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/imagev2/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" "github.com/stackrox/rox/central/ranking" @@ -17,6 +16,5 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) DataStore { riskStore := riskDS.GetTestPostgresDataStore(t, pool) imageRanker := ranking.ImageRanker() imageComponentRanker := ranking.ComponentRanker() - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) - return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker, imageCVEInfo) + return NewWithPostgres(dbstore, riskStore, imageRanker, imageComponentRanker) } diff --git a/central/imagev2/datastore/singleton.go b/central/imagev2/datastore/singleton.go index 1bad2af434508..fe6f5fb3e6ed7 100644 --- a/central/imagev2/datastore/singleton.go +++ b/central/imagev2/datastore/singleton.go @@ -1,7 +1,6 @@ package datastore import ( - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" "github.com/stackrox/rox/central/globaldb" "github.com/stackrox/rox/central/imagev2/datastore/keyfence" pgStore "github.com/stackrox/rox/central/imagev2/datastore/store/postgres" @@ -19,7 +18,7 @@ var ( func initialize() { storage := pgStore.New(globaldb.GetPostgres(), false, keyfence.ImageKeyFenceSingleton()) - ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfoDS.Singleton()) + ad = NewWithPostgres(storage, riskDS.Singleton(), ranking.ImageRanker(), ranking.ComponentRanker()) } // Singleton provides the interface for non-service external interaction. diff --git a/central/imagev2/datastoretest/datastore_impl_test.go b/central/imagev2/datastoretest/datastore_impl_test.go index 1e92e0eab8a53..3ed6c2147ca99 100644 --- a/central/imagev2/datastoretest/datastore_impl_test.go +++ b/central/imagev2/datastoretest/datastore_impl_test.go @@ -8,7 +8,6 @@ import ( "sort" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEDS "github.com/stackrox/rox/central/cve/image/v2/datastore" imageCVEPostgres "github.com/stackrox/rox/central/cve/image/v2/datastore/store/postgres" imageComponentDS "github.com/stackrox/rox/central/imagecomponent/v2/datastore" @@ -64,8 +63,7 @@ func (s *ImageV2DataStoreTestSuite) SetupSuite() { func (s *ImageV2DataStoreTestSuite) SetupTest() { s.mockRisk = mockRisks.NewMockDataStore(gomock.NewController(s.T())) dbStore := pgStore.New(s.testDB.DB, false, keyfence.ImageKeyFenceSingleton()) - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) - s.datastore = imageDataStoreV2.NewWithPostgres(dbStore, s.mockRisk, ranking.NewRanker(), ranking.NewRanker(), imageCVEInfo) + s.datastore = imageDataStoreV2.NewWithPostgres(dbStore, s.mockRisk, ranking.NewRanker(), ranking.NewRanker()) componentStorage := imageComponentPostgres.New(s.testDB.DB) s.componentDataStore = imageComponentDS.New(componentStorage, s.mockRisk, ranking.NewRanker()) diff --git a/central/pruning/pruning_test.go b/central/pruning/pruning_test.go index 517fa32fb0db3..9313db7086fe1 100644 --- a/central/pruning/pruning_test.go +++ b/central/pruning/pruning_test.go @@ -17,7 +17,6 @@ import ( configDatastore "github.com/stackrox/rox/central/config/datastore" configDatastoreMocks "github.com/stackrox/rox/central/config/datastore/mocks" clusterCVEDS "github.com/stackrox/rox/central/cve/cluster/datastore/mocks" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" nodeCVEDS "github.com/stackrox/rox/central/cve/node/datastore" deploymentDatastore "github.com/stackrox/rox/central/deployment/datastore" imageDatastore "github.com/stackrox/rox/central/image/datastore" @@ -255,14 +254,12 @@ func (s *PruningTestSuite) generateImageDataStructures(ctx context.Context) (ale var images imageDatastore.DataStore var imagesV2 imageV2Datastore.DataStore - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.pool) if features.FlattenImageData.Enabled() { imagesV2 = imageV2Datastore.NewWithPostgres( imageV2Postgres.New(s.pool, true, concurrency.NewKeyFence()), mockRiskDatastore, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) } else { images = imageDatastore.NewWithPostgres( @@ -270,7 +267,6 @@ func (s *PruningTestSuite) generateImageDataStructures(ctx context.Context) (ale mockRiskDatastore, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) } diff --git a/central/reprocessor/reprocessor_test.go b/central/reprocessor/reprocessor_test.go index abb034535cba2..d95e80c37a9db 100644 --- a/central/reprocessor/reprocessor_test.go +++ b/central/reprocessor/reprocessor_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDatastore "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2Datastore "github.com/stackrox/rox/central/imagev2/datastore" @@ -35,8 +34,7 @@ func TestImagesWithSignaturesQuery(t *testing.T) { pool := testingDB.DB defer pool.Close() - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) - imageDS := imageDatastore.NewWithPostgres(imagePostgresV2.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfo) + imageDS := imageDatastore.NewWithPostgres(imagePostgresV2.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker()) imgWithSignature := fixtures.GetImage() imgWithoutSignature := fixtures.GetImageWithUniqueComponents(10) @@ -82,8 +80,7 @@ func TestImagesWithSignaturesQueryV2(t *testing.T) { pool := testingDB.DB defer pool.Close() - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(t, pool) - imageDS := imageV2Datastore.NewWithPostgres(imageV2PG.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker(), imageCVEInfo) + imageDS := imageV2Datastore.NewWithPostgres(imageV2PG.New(pool, false, concurrency.NewKeyFence()), nil, ranking.ImageRanker(), ranking.ComponentRanker()) imgWithSignature := fixtures.GetImageV2() imgWithoutSignature := fixtures.GetImageV2WithUniqueComponents(10) diff --git a/central/views/deployments/view_test.go b/central/views/deployments/view_test.go index a05c10c46bfcb..2a6cdb213d0da 100644 --- a/central/views/deployments/view_test.go +++ b/central/views/deployments/view_test.go @@ -7,7 +7,6 @@ import ( "sort" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" deploymentDS "github.com/stackrox/rox/central/deployment/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" @@ -102,13 +101,11 @@ func (s *DeploymentViewTestSuite) SetupSuite() { mockRisk := mockRisks.NewMockDataStore(mockCtrl) // Initialize the datastore. - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) imageStore := imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) deploymentStore, err := deploymentDS.NewTestDataStore( s.T(), diff --git a/central/views/imagecveflat/view_test.go b/central/views/imagecveflat/view_test.go index 01a279b1d5e06..9bb239e94698d 100644 --- a/central/views/imagecveflat/view_test.go +++ b/central/views/imagecveflat/view_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageCVEV2DS "github.com/stackrox/rox/central/cve/image/v2/datastore" deploymentDS "github.com/stackrox/rox/central/deployment/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" @@ -119,13 +118,11 @@ func (s *ImageCVEFlatViewTestSuite) SetupSuite() { mockRisk := mockRisks.NewMockDataStore(mockCtrl) // Initialize the datastore. - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) imageStore := imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) deploymentStore, err := deploymentDS.NewTestDataStore( s.T(), diff --git a/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go b/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go index 5bbb0ca4d5064..0533a8ee1e377 100644 --- a/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go +++ b/central/vulnmgmt/vulnerabilityrequest/manager/querymgr/query_manager_impl_test.go @@ -6,7 +6,6 @@ import ( "context" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2DS "github.com/stackrox/rox/central/imagev2/datastore" @@ -74,13 +73,11 @@ func (s *VulnReqQueryManagerTestSuite) SetupTest() { } func (s *VulnReqQueryManagerTestSuite) createImageDataStore() { - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(s.T(), s.testDB.DB) s.imageDataStore = imageDS.NewWithPostgres( imagePostgresV2.New(s.testDB.DB, false, concurrency.NewKeyFence()), mockRisks.NewMockDataStore(s.mockCtrl), ranking.NewRanker(), ranking.NewRanker(), - imageCVEInfo, ) if features.FlattenImageData.Enabled() { s.imageV2DataStore = imageV2DS.NewWithPostgres( @@ -88,7 +85,6 @@ func (s *VulnReqQueryManagerTestSuite) createImageDataStore() { mockRisks.NewMockDataStore(s.mockCtrl), ranking.NewRanker(), ranking.NewRanker(), - imageCVEInfo, ) } } diff --git a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go index d629494a40b10..c0b99b4d81b35 100644 --- a/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go +++ b/central/vulnmgmt/vulnerabilityrequest/manager/requestmgr/manager_impl_flat_cve_data_test.go @@ -6,7 +6,6 @@ import ( "context" "testing" - imageCVEInfoDS "github.com/stackrox/rox/central/cve/image/info/datastore" imageDS "github.com/stackrox/rox/central/image/datastore" imagePostgresV2 "github.com/stackrox/rox/central/image/datastore/store/v2/postgres" imageV2DS "github.com/stackrox/rox/central/imagev2/datastore" @@ -56,14 +55,12 @@ func (m *managerImplTestFlatData) SetupSuite() { m.Require().NoError(err) var imageStore imageDS.DataStore var imageV2Store imageV2DS.DataStore - imageCVEInfo := imageCVEInfoDS.GetTestPostgresDataStore(m.T(), m.testDB.DB) if features.FlattenImageData.Enabled() { imageV2Store = imageV2DS.NewWithPostgres( imageV2Postgres.New(m.testDB.DB, false, concurrency.NewKeyFence()), mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) } else { imageStore = imageDS.NewWithPostgres( @@ -71,7 +68,6 @@ func (m *managerImplTestFlatData) SetupSuite() { mockRisk, ranking.ImageRanker(), ranking.ComponentRanker(), - imageCVEInfo, ) } m.manager = &managerImpl{ diff --git a/pkg/images/enricher/enricher.go b/pkg/images/enricher/enricher.go index 7f21c97754e01..e102cb902fbae 100644 --- a/pkg/images/enricher/enricher.go +++ b/pkg/images/enricher/enricher.go @@ -140,6 +140,15 @@ type CVESuppressor interface { EnrichImageV2WithSuppressedCVEs(image *storage.ImageV2) } +// CVEInfoEnricher provides enrichment for CVE timing metadata (FixAvailableTimestamp and FirstSystemOccurrence). +type CVEInfoEnricher interface { + // EnrichImageWithCVEInfo enriches a V1 image's CVEs with timing metadata from the ImageCVEInfo lookup table. + EnrichImageWithCVEInfo(ctx context.Context, image *storage.Image) error + + // EnrichImageV2WithCVEInfo enriches a V2 image's CVEs with timing metadata from the ImageCVEInfo lookup table. + EnrichImageV2WithCVEInfo(ctx context.Context, image *storage.ImageV2) error +} + // TODO(ROX-30117): Remove this and use ImageGetterV2 after ImageV2 model is fully rolled out. // ImageGetter will be used to retrieve a specific image from the datastore. type ImageGetter func(ctx context.Context, id string) (*storage.Image, bool, error) @@ -156,11 +165,12 @@ type signatureVerifierForIntegrations func(ctx context.Context, integrations []* // New returns a new ImageEnricher instance for the given subsystem. // (The subsystem is just used for Prometheus metrics.) -func New(cvesSuppressorV2 CVESuppressor, is integration.Set, subsystem pkgMetrics.Subsystem, metadataCache cache.ImageMetadata, baseImageGetter BaseImageGetter, +func New(cvesSuppressorV2 CVESuppressor, cveInfoEnricher CVEInfoEnricher, is integration.Set, subsystem pkgMetrics.Subsystem, metadataCache cache.ImageMetadata, baseImageGetter BaseImageGetter, imageGetter ImageGetter, healthReporter integrationhealth.Reporter, signatureIntegrationGetter SignatureIntegrationGetter, scanDelegator delegatedregistry.Delegator) ImageEnricher { enricher := &enricherImpl{ cvesSuppressorV2: cvesSuppressorV2, + cveInfoEnricher: cveInfoEnricher, integrations: is, // number of consecutive errors per registry or scanner to ascertain health of the integration diff --git a/pkg/images/enricher/enricher_impl.go b/pkg/images/enricher/enricher_impl.go index e65eaf1b5df62..a99eaa1a157db 100644 --- a/pkg/images/enricher/enricher_impl.go +++ b/pkg/images/enricher/enricher_impl.go @@ -48,6 +48,7 @@ var ( type enricherImpl struct { cvesSuppressorV2 CVESuppressor + cveInfoEnricher CVEInfoEnricher integrations integration.Set errorsPerRegistry map[registryTypes.ImageRegistry]int32 @@ -97,6 +98,14 @@ func (e *enricherImpl) EnrichWithVulnerabilities(image *storage.Image, component ScanResult: ScanNotDone, }, errors.Wrapf(err, "retrieving image vulnerabilities from %s [%s]", scanner.Name(), scanner.Type()) } + + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageWithCVEInfo(context.Background(), image); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressorV2.EnrichImageWithSuppressedCVEs(image) return EnrichmentResult{ @@ -186,6 +195,13 @@ func (e *enricherImpl) delegateEnrichImage(ctx context.Context, enrichCtx Enrich image.Reset() protocompat.Merge(image, scannedImage) + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageWithCVEInfo(ctx, image); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressorV2.EnrichImageWithSuppressedCVEs(image) return true, nil } @@ -326,6 +342,13 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment updated = updated || didUpdateSigVerificationData + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageWithCVEInfo(ctx, image); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressorV2.EnrichImageWithSuppressedCVEs(image) if !errorList.Empty() { diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index ce599d16da226..314fe0c68f079 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -1450,7 +1450,7 @@ func TestEnrichImageWithBaseImages(t *testing.T) { } func newEnricher(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricher { - return New(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, + return New(&fakeCVESuppressorV2{}, nil, set, pkgMetrics.CentralSubsystem, newCache(), emptyBaseImageGetter, emptyImageGetter, diff --git a/pkg/images/enricher/enricher_v2.go b/pkg/images/enricher/enricher_v2.go index 169ac47b396f1..90122ddab9e51 100644 --- a/pkg/images/enricher/enricher_v2.go +++ b/pkg/images/enricher/enricher_v2.go @@ -39,12 +39,13 @@ type BaseImageGetterV2 func(ctx context.Context, layers []string) ([]*storage.Ba // NewV2 returns a new ImageEnricherV2 instance for the given subsystem. // (The subsystem is just used for Prometheus metrics.) -func NewV2(cvesSuppressor CVESuppressor, is integration.Set, subsystem pkgMetrics.Subsystem, metadataCache cache.ImageMetadata, baseImageGetter BaseImageGetterV2, +func NewV2(cvesSuppressor CVESuppressor, cveInfoEnricher CVEInfoEnricher, is integration.Set, subsystem pkgMetrics.Subsystem, metadataCache cache.ImageMetadata, baseImageGetter BaseImageGetterV2, imageGetter ImageGetterV2, healthReporter integrationhealth.Reporter, signatureIntegrationGetter SignatureIntegrationGetter, scanDelegator delegatedregistry.Delegator) ImageEnricherV2 { enricher := &enricherV2Impl{ - cvesSuppressor: cvesSuppressor, - integrations: is, + cvesSuppressor: cvesSuppressor, + cveInfoEnricher: cveInfoEnricher, + integrations: is, // number of consecutive errors per registry or scanner to ascertain health of the integration errorsPerRegistry: make(map[registryTypes.ImageRegistry]int32), diff --git a/pkg/images/enricher/enricher_v2_impl.go b/pkg/images/enricher/enricher_v2_impl.go index 39e110b37ac67..c0dd35d450a35 100644 --- a/pkg/images/enricher/enricher_v2_impl.go +++ b/pkg/images/enricher/enricher_v2_impl.go @@ -37,8 +37,9 @@ import ( var _ ImageEnricherV2 = (*enricherV2Impl)(nil) type enricherV2Impl struct { - cvesSuppressor CVESuppressor - integrations integration.Set + cvesSuppressor CVESuppressor + cveInfoEnricher CVEInfoEnricher + integrations integration.Set errorsPerRegistry map[registryTypes.ImageRegistry]int32 registryErrorsLock sync.RWMutex @@ -87,6 +88,14 @@ func (e *enricherV2Impl) EnrichWithVulnerabilities(imageV2 *storage.ImageV2, com ScanResult: ScanNotDone, }, errors.Wrapf(err, "retrieving image vulnerabilities from %s [%s]", scanner.Name(), scanner.Type()) } + + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageV2WithCVEInfo(context.Background(), imageV2); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) return EnrichmentResult{ @@ -177,6 +186,13 @@ func (e *enricherV2Impl) delegateEnrichImage(ctx context.Context, enrichCtx Enri imageV2.Reset() protocompat.Merge(imageV2, scannedImage) + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageV2WithCVEInfo(ctx, imageV2); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) return true, nil } @@ -314,6 +330,13 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme updated = updated || didUpdateSigVerificationData + // Enrich CVEs with timing metadata (FixAvailableTimestamp, FirstSystemOccurrence) + if e.cveInfoEnricher != nil { + if err := e.cveInfoEnricher.EnrichImageV2WithCVEInfo(ctx, imageV2); err != nil { + log.Warnf("Failed to enrich CVEs from ImageCVEInfo: %v", err) + } + } + e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) if !errorList.Empty() { diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index f126d4ef34808..7859c0e9ad0ee 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -1317,7 +1317,7 @@ func TestEnrichImageWithBaseImagesV2(t *testing.T) { } func newEnricherV2(set *mocks.MockSet, mockReporter *reporterMocks.MockReporter) ImageEnricherV2 { - return NewV2(&fakeCVESuppressorV2{}, set, pkgMetrics.CentralSubsystem, + return NewV2(&fakeCVESuppressorV2{}, nil, set, pkgMetrics.CentralSubsystem, newCache(), emptyBaseImageGetterV2, emptyImageGetterV2, diff --git a/pkg/images/enricher/mocks/enricher.go b/pkg/images/enricher/mocks/enricher.go index 75e10897573cb..b9b7a62f77cd9 100644 --- a/pkg/images/enricher/mocks/enricher.go +++ b/pkg/images/enricher/mocks/enricher.go @@ -136,3 +136,55 @@ func (mr *MockCVESuppressorMockRecorder) EnrichImageWithSuppressedCVEs(image any mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnrichImageWithSuppressedCVEs", reflect.TypeOf((*MockCVESuppressor)(nil).EnrichImageWithSuppressedCVEs), image) } + +// MockCVEInfoEnricher is a mock of CVEInfoEnricher interface. +type MockCVEInfoEnricher struct { + ctrl *gomock.Controller + recorder *MockCVEInfoEnricherMockRecorder + isgomock struct{} +} + +// MockCVEInfoEnricherMockRecorder is the mock recorder for MockCVEInfoEnricher. +type MockCVEInfoEnricherMockRecorder struct { + mock *MockCVEInfoEnricher +} + +// NewMockCVEInfoEnricher creates a new mock instance. +func NewMockCVEInfoEnricher(ctrl *gomock.Controller) *MockCVEInfoEnricher { + mock := &MockCVEInfoEnricher{ctrl: ctrl} + mock.recorder = &MockCVEInfoEnricherMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCVEInfoEnricher) EXPECT() *MockCVEInfoEnricherMockRecorder { + return m.recorder +} + +// EnrichImageV2WithCVEInfo mocks base method. +func (m *MockCVEInfoEnricher) EnrichImageV2WithCVEInfo(ctx context.Context, image *storage.ImageV2) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnrichImageV2WithCVEInfo", ctx, image) + ret0, _ := ret[0].(error) + return ret0 +} + +// EnrichImageV2WithCVEInfo indicates an expected call of EnrichImageV2WithCVEInfo. +func (mr *MockCVEInfoEnricherMockRecorder) EnrichImageV2WithCVEInfo(ctx, image any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnrichImageV2WithCVEInfo", reflect.TypeOf((*MockCVEInfoEnricher)(nil).EnrichImageV2WithCVEInfo), ctx, image) +} + +// EnrichImageWithCVEInfo mocks base method. +func (m *MockCVEInfoEnricher) EnrichImageWithCVEInfo(ctx context.Context, image *storage.Image) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnrichImageWithCVEInfo", ctx, image) + ret0, _ := ret[0].(error) + return ret0 +} + +// EnrichImageWithCVEInfo indicates an expected call of EnrichImageWithCVEInfo. +func (mr *MockCVEInfoEnricherMockRecorder) EnrichImageWithCVEInfo(ctx, image any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnrichImageWithCVEInfo", reflect.TypeOf((*MockCVEInfoEnricher)(nil).EnrichImageWithCVEInfo), ctx, image) +} From 12a7375a6efe45ad6d312ddd5534d643b4e409a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:09:56 +0100 Subject: [PATCH 128/232] chore(deps): bump github.com/bufbuild/buf from 1.64.0 to 1.65.0 in /tools/proto (#18833) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tools/proto/go.mod | 19 +++++++++--------- tools/proto/go.sum | 50 +++++++++++++++++++--------------------------- 2 files changed, 29 insertions(+), 40 deletions(-) diff --git a/tools/proto/go.mod b/tools/proto/go.mod index 1412503a158de..0c0143957b471 100644 --- a/tools/proto/go.mod +++ b/tools/proto/go.mod @@ -3,7 +3,7 @@ module github.com/stackrox/stackrox/tools/proto go 1.25 require ( - github.com/bufbuild/buf v1.64.0 + github.com/bufbuild/buf v1.65.0 github.com/favadi/protoc-go-inject-tag v1.4.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 ) @@ -12,8 +12,8 @@ require ( buf.build/gen/go/bufbuild/bufplugin/protocolbuffers/go v1.36.11-20250718181942-e35f9b667443.1 // indirect buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1 // indirect buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 // indirect - buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 // indirect - buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 // indirect + buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2 // indirect + buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1 // indirect buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 // indirect buf.build/go/app v0.2.0 // indirect buf.build/go/bufplugin v0.9.0 // indirect @@ -29,16 +29,16 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/bufbuild/protocompile v0.14.2-0.20260114160500-16922e24f2b6 // indirect + github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e // indirect github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cli/browser v1.3.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.1.5+incompatible // indirect + github.com/docker/cli v29.2.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.5.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.5 // indirect @@ -49,7 +49,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.13.0 // indirect - github.com/google/cel-go v0.26.1 // indirect + github.com/google/cel-go v0.27.0 // indirect github.com/google/go-containerregistry v0.20.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.21.0 // indirect @@ -77,7 +77,6 @@ require ( github.com/sirupsen/logrus v1.9.4 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect - github.com/stoewer/go-strcase v1.3.1 // indirect github.com/tetratelabs/wazero v1.11.0 // indirect github.com/tidwall/btree v1.8.1 // indirect github.com/vbatts/tar-split v0.12.2 // indirect @@ -102,8 +101,8 @@ require ( golang.org/x/sys v0.40.0 // indirect golang.org/x/term v0.39.0 // indirect golang.org/x/text v0.33.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect diff --git a/tools/proto/go.sum b/tools/proto/go.sum index 590781cd7a65a..0a67f2c2ef411 100644 --- a/tools/proto/go.sum +++ b/tools/proto/go.sum @@ -4,10 +4,10 @@ buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-2025010916 buf.build/gen/go/bufbuild/protodescriptor/protocolbuffers/go v1.36.11-20250109164928-1da0de137947.1/go.mod h1:8PRKXhgNes29Tjrnv8KdZzg3I1QceOkzibW1QK7EXv0= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1 h1:j9yeqTWEFrtimt8Nng2MIeRrpoCvQzM9/g25XTvqUGg= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= -buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2 h1:eQ6XRVUaYYZFOZvBsyrOYLWbw6464s5dVnHscxa0b8w= -buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20251202164234-62b14f0b533c.2/go.mod h1:omxVRch3jEPMINnUipLsuRWoEhND6LPXELKBG7xzyDw= -buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1 h1:PdfIJUbUVKdajMVYuMdvr2Wvo+wmzGnlPEYA4bhFaWI= -buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20251202164234-62b14f0b533c.1/go.mod h1:1JJi9jvOqRxSMa+JxiZSm57doB+db/1WYCIa2lHfc40= +buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2 h1:XPrWCd9ydEo5Ofv1aNJVJaxndMXLQjRO9vVzsJG3jL8= +buf.build/gen/go/bufbuild/registry/connectrpc/go v1.19.1-20260126144947-819582968857.2/go.mod h1:mpsjeEaxOYPIJV2cz4IagLghZufRvx+NPVtInjEeoQ8= +buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1 h1:Yreby6Ypa58wdQUEm9Fnc5g8n/jP487Dq3aK5yBYwfk= +buf.build/gen/go/bufbuild/registry/protocolbuffers/go v1.36.11-20260126144947-819582968857.1/go.mod h1:1JJi9jvOqRxSMa+JxiZSm57doB+db/1WYCIa2lHfc40= buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1 h1:iGPvEJltOXUMANWf0zajcRcbiOXLD90ZwPUFvbcuv6Q= buf.build/gen/go/pluginrpc/pluginrpc/protocolbuffers/go v1.36.11-20241007202033-cf42259fcbfc.1/go.mod h1:nWVKKRA29zdt4uvkjka3i/y4mkrswyWwiu0TbdX0zts= buf.build/go/app v0.2.0 h1:NYaH13A+RzPb7M5vO8uZYZ2maBZI5+MS9A9tQm66fy8= @@ -38,14 +38,14 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= -github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE= -github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs= +github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4= github.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/bufbuild/buf v1.64.0 h1:puHWFcVKmZFSu4KuaN0kZiQ32n7VVc3un1FeLU77XUs= -github.com/bufbuild/buf v1.64.0/go.mod h1:U4ISwkjZXRLMaCkPG9zp1xY3xHEIwhCFwyNAaA56SGw= -github.com/bufbuild/protocompile v0.14.2-0.20260114160500-16922e24f2b6 h1:0PbP1qlDR1ZVc0WBkGmB2Rup3CwtSvLI3nZBltDg4G8= -github.com/bufbuild/protocompile v0.14.2-0.20260114160500-16922e24f2b6/go.mod h1:5UUj46Eu+U+C59C5N6YilaMI7WWfP2bW9xGcOkme2DI= +github.com/bufbuild/buf v1.65.0 h1:f2BzeCY9rRh9P5KD340ZoPAaFLTkssoUTHx7lpqozgg= +github.com/bufbuild/buf v1.65.0/go.mod h1:7SAs2YqGpPXHqBBXBeYQbCzY0OQq4Jbg6XCqirEiYvQ= +github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e h1:emH16Bf1w4C0cJ3ge4QtBAl4sIYJe23EfpWH0SpA9co= +github.com/bufbuild/protocompile v0.14.2-0.20260130195850-5c64bed4577e/go.mod h1:cxhE8h+14t0Yxq2H9MV/UggzQ1L0gh0t2tJobITWsBE= github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1 h1:V1xulAoqLqVg44rY97xOR+mQpD2N+GzhMHVwJ030WEU= github.com/bufbuild/protoplugin v0.0.0-20250218205857-750e09ce93e1/go.mod h1:c5D8gWRIZ2HLWO3gXYTtUfw/hbJyD8xikv2ooPxnklQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -60,20 +60,19 @@ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151X github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= -github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= +github.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw= +github.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v29.1.5+incompatible h1:GckbANUt3j+lsnQ6eCcQd70mNSOismSHWt8vk2AX8ao= -github.com/docker/cli v29.1.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM= +github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= @@ -97,8 +96,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= -github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= -github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -175,14 +174,6 @@ github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiT github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= -github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tetratelabs/wazero v1.11.0 h1:+gKemEuKCTevU4d7ZTzlsvgd1uaToIDtlQlmNbwqYhA= @@ -252,10 +243,10 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3 h1:X9z6obt+cWRX8XjDVOn+SZWhWe5kZHm46TThU9j+jss= -google.golang.org/genproto/googleapis/api v0.0.0-20260114163908-3f89685c29c3/go.mod h1:dd646eSK+Dk9kxVBl1nChEOhJPtMXriCcVb4x3o6J+E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3 h1:C4WAdL+FbjnGlpp2S+HMVhBeCq2Lcib4xZqfPNF6OoQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260114163908-3f89685c29c3/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 h1:6Al3kEFFP9VJhRz3DID6quisgPnTeZVr4lep9kkxdPA= @@ -265,7 +256,6 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= From 298e2f27ed48debf2d60d6784afc4155228c86f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:10:16 +0100 Subject: [PATCH 129/232] chore(deps): bump github.com/googleapis/gax-go/v2 from 2.16.0 to 2.17.0 (#18835) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 75624c9cc221c..1e3159b783f3c 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,7 @@ require ( github.com/google/go-containerregistry v0.20.7 github.com/google/go-github/v60 v60.0.0 github.com/google/uuid v1.6.0 - github.com/googleapis/gax-go/v2 v2.16.0 + github.com/googleapis/gax-go/v2 v2.17.0 github.com/gorilla/schema v1.4.1 github.com/grafana/pyroscope-go v1.2.7 github.com/graph-gophers/graphql-go v1.5.0 @@ -151,8 +151,8 @@ require ( golang.org/x/tools v0.41.0 golang.stackrox.io/grpc-http1 v0.5.1 google.golang.org/api v0.265.0 - google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 - google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b + google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 google.golang.org/grpc v1.78.0 google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 google.golang.org/protobuf v1.36.11 @@ -185,7 +185,7 @@ require ( cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/iam v1.5.3 // indirect - cloud.google.com/go/longrunning v0.7.0 // indirect + cloud.google.com/go/longrunning v0.8.0 // indirect cloud.google.com/go/monitoring v1.24.3 // indirect github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect diff --git a/go.sum b/go.sum index 14e1e80ce44c5..e580e6e4c6aab 100644 --- a/go.sum +++ b/go.sum @@ -54,12 +54,12 @@ cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1 cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= +cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= +cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= -cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= -cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -848,8 +848,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= -github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= +github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc= +github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -2250,10 +2250,10 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= +google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= +google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= From 687b2ef20d9f7d65d55146af6bd00c96d7ed17fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:10:52 +0100 Subject: [PATCH 130/232] chore(deps): bump github.com/aws/aws-sdk-go-v2/feature/s3/manager from 1.21.1 to 1.22.0 in the aws-sdk-go-v2 group (#18893) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1e3159b783f3c..8df12dec79964 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.32.7 github.com/aws/aws-sdk-go-v2/credentials v1.19.7 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0 github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 diff --git a/go.sum b/go.sum index e580e6e4c6aab..406885525b9fc 100644 --- a/go.sum +++ b/go.sum @@ -295,8 +295,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUT github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1 h1:1hWFp+52Vq8Fevy/KUhbW/1MEApMz7uitCF/PQXRJpk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.21.1/go.mod h1:sIec8j802/rCkCKgZV678HFR0s7lhQUYXT77tIvlaa4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0 h1:MpkX8EjkwuvyuX9B7+Zgk5M4URb2WQ84Y6jM81n5imw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0/go.mod h1:4V9Pv5sFfMPWQF0Q0zYN6BlV/504dFGaTeogallRqQw= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= From 785bb655e1a13c9de868119aadb9ae5d37186c68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 10:25:02 +0100 Subject: [PATCH 131/232] chore(deps-dev): bump jsonpath from 1.1.1 to 1.2.0 in /tests/performance/load (#18875) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/performance/load/package-lock.json | 191 +++++------------------ 1 file changed, 42 insertions(+), 149 deletions(-) diff --git a/tests/performance/load/package-lock.json b/tests/performance/load/package-lock.json index e919045b6e62a..8e10227f1e378 100644 --- a/tests/performance/load/package-lock.json +++ b/tests/performance/load/package-lock.json @@ -576,22 +576,21 @@ } }, "node_modules/escodegen": { - "version": "1.14.3", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", - "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", "dev": true, "dependencies": { "esprima": "^4.0.1", - "estraverse": "^4.2.0", - "esutils": "^2.0.2", - "optionator": "^0.8.1" + "estraverse": "^5.2.0", + "esutils": "^2.0.2" }, "bin": { "escodegen": "bin/escodegen.js", "esgenerate": "bin/esgenerate.js" }, "engines": { - "node": ">=4.0" + "node": ">=6.0" }, "optionalDependencies": { "source-map": "~0.6.1" @@ -610,54 +609,6 @@ "node": ">=4" } }, - "node_modules/escodegen/node_modules/estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/escodegen/node_modules/levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", - "dev": true, - "dependencies": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/escodegen/node_modules/optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", - "dev": true, - "dependencies": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/escodegen/node_modules/prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, "node_modules/escodegen/node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", @@ -668,18 +619,6 @@ "node": ">=0.10.0" } }, - "node_modules/escodegen/node_modules/type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", - "dev": true, - "dependencies": { - "prelude-ls": "~1.1.2" - }, - "engines": { - "node": ">= 0.8.0" - } - }, "node_modules/eslint": { "version": "8.27.0", "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.27.0.tgz", @@ -815,9 +754,9 @@ } }, "node_modules/esprima": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.2.tgz", - "integrity": "sha512-+JpPZam9w5DuJ3Q67SqsMGtiHKENSMRVoxvArfJZK01/BfLEObtZ6orJa/MtoGNR/rfMgp5837T41PAmTwAv/A==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.5.tgz", + "integrity": "sha512-S9VbPDU0adFErpDai3qDkjq8+G05ONtKzcyNrPKg/ZKa+tf879nX2KexNU95b31UoTJjRLInNBHHHjFPoCd7lQ==", "dev": true, "bin": { "esparse": "bin/esparse.js", @@ -1590,14 +1529,14 @@ ] }, "node_modules/jsonpath": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.1.1.tgz", - "integrity": "sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.0.tgz", + "integrity": "sha512-EVm29wT2coM0QfZd8TREEeMTOxZcyV3oCQ61AM0DrMkVaVCKXtPEm0oJccEbz5P9Oi+JwRkkIt0Bkn63gqCHjg==", "dev": true, "dependencies": { - "esprima": "1.2.2", - "static-eval": "2.0.2", - "underscore": "1.12.1" + "esprima": "1.2.5", + "static-eval": "2.1.1", + "underscore": "1.13.6" } }, "node_modules/JSONStream": { @@ -2233,12 +2172,12 @@ } }, "node_modules/static-eval": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.2.tgz", - "integrity": "sha512-N/D219Hcr2bPjLxPiV+TQE++Tsmrady7TqAJugLy7Xk1EumfDWS/f5dtBbkRCGE7wKKXuYockQoj8Rm2/pVKyg==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.1.1.tgz", + "integrity": "sha512-MgWpQ/ZjGieSVB3eOJVs4OA2LT/q1vx98KPCTTQPzq/aLr0YUXTsgryTXr4SLfR0ZfUUCiedM9n/ABeDIyy4mA==", "dev": true, "dependencies": { - "escodegen": "^1.8.1" + "escodegen": "^2.1.0" } }, "node_modules/string_decoder": { @@ -2446,9 +2385,9 @@ } }, "node_modules/underscore": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.1.tgz", - "integrity": "sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==", + "version": "1.13.6", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.6.tgz", + "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==", "dev": true }, "node_modules/universalify": { @@ -3008,15 +2947,14 @@ "dev": true }, "escodegen": { - "version": "1.14.3", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", - "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", "dev": true, "requires": { "esprima": "^4.0.1", - "estraverse": "^4.2.0", + "estraverse": "^5.2.0", "esutils": "^2.0.2", - "optionator": "^0.8.1", "source-map": "~0.6.1" }, "dependencies": { @@ -3026,57 +2964,12 @@ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", - "dev": true, - "requires": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" - } - }, - "optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", - "dev": true, - "requires": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" - } - }, - "prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", - "dev": true - }, "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "dev": true, "optional": true - }, - "type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", - "dev": true, - "requires": { - "prelude-ls": "~1.1.2" - } } } }, @@ -3179,9 +3072,9 @@ } }, "esprima": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.2.tgz", - "integrity": "sha512-+JpPZam9w5DuJ3Q67SqsMGtiHKENSMRVoxvArfJZK01/BfLEObtZ6orJa/MtoGNR/rfMgp5837T41PAmTwAv/A==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.2.5.tgz", + "integrity": "sha512-S9VbPDU0adFErpDai3qDkjq8+G05ONtKzcyNrPKg/ZKa+tf879nX2KexNU95b31UoTJjRLInNBHHHjFPoCd7lQ==", "dev": true }, "esquery": { @@ -3800,14 +3693,14 @@ "dev": true }, "jsonpath": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.1.1.tgz", - "integrity": "sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.0.tgz", + "integrity": "sha512-EVm29wT2coM0QfZd8TREEeMTOxZcyV3oCQ61AM0DrMkVaVCKXtPEm0oJccEbz5P9Oi+JwRkkIt0Bkn63gqCHjg==", "dev": true, "requires": { - "esprima": "1.2.2", - "static-eval": "2.0.2", - "underscore": "1.12.1" + "esprima": "1.2.5", + "static-eval": "2.1.1", + "underscore": "1.13.6" } }, "JSONStream": { @@ -4263,12 +4156,12 @@ "dev": true }, "static-eval": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.0.2.tgz", - "integrity": "sha512-N/D219Hcr2bPjLxPiV+TQE++Tsmrady7TqAJugLy7Xk1EumfDWS/f5dtBbkRCGE7wKKXuYockQoj8Rm2/pVKyg==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/static-eval/-/static-eval-2.1.1.tgz", + "integrity": "sha512-MgWpQ/ZjGieSVB3eOJVs4OA2LT/q1vx98KPCTTQPzq/aLr0YUXTsgryTXr4SLfR0ZfUUCiedM9n/ABeDIyy4mA==", "dev": true, "requires": { - "escodegen": "^1.8.1" + "escodegen": "^2.1.0" } }, "string_decoder": { @@ -4442,9 +4335,9 @@ "dev": true }, "underscore": { - "version": "1.12.1", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.12.1.tgz", - "integrity": "sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==", + "version": "1.13.6", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.6.tgz", + "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==", "dev": true }, "universalify": { From d03bf57e2dd84f561836cb4abd17f1d3d3e29e09 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Fri, 6 Feb 2026 13:31:59 +0100 Subject: [PATCH 132/232] ROX-32817: bump compliance test timeout (#18842) --- tests/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Makefile b/tests/Makefile index d0f52e785de28..cea720e4a6110 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -30,7 +30,7 @@ external-backup-tests: .PHONY: compliance-v2-tests compliance-v2-tests: - @GOTAGS=$(GOTAGS),test,compliance ../scripts/go-test.sh -cover -v -run TestComplianceV2 2>&1 | tee test.log + @GOTAGS=$(GOTAGS),test,compliance ../scripts/go-test.sh -timeout 30m -cover -v -run TestComplianceV2 2>&1 | tee test.log @$(MAKE) report JUNIT_OUT=compliance-v2-tests-results include ../make/stackrox.mk From 176df7d0cfba3a486892ebf821f14f9ab511419b Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Fri, 6 Feb 2026 19:01:37 +0530 Subject: [PATCH 133/232] fix(ui): Minor fix to preserve alphabetical order (#18894) --- .../Policies/Wizard/Step3/policyCriteriaDescriptors.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx index dfd6c397f8219..db1f562af2404 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx @@ -1404,6 +1404,10 @@ export const policyCriteriaDescriptors: Descriptor[] = [ category: policyCriteriaCategories.USER_ISSUED_CONTAINER_COMMANDS, type: 'select', options: [ + { + label: 'Pod attach', + value: 'PODS_ATTACH', + }, { label: 'Pod exec', value: 'PODS_EXEC', @@ -1412,10 +1416,6 @@ export const policyCriteriaDescriptors: Descriptor[] = [ label: 'Pod port forward', value: 'PODS_PORTFORWARD', }, - { - label: 'Pod attach', - value: 'PODS_ATTACH', - }, ], canBooleanLogic: false, lifecycleStages: ['RUNTIME'], From 2d773701329bf86a76562c456c4b66530e7f2943 Mon Sep 17 00:00:00 2001 From: David Vail Date: Fri, 6 Feb 2026 08:32:21 -0500 Subject: [PATCH 134/232] ROX-32855: Remove long outdated Postgresql info banner (#18728) --- .../MainPage/Banners/AnnouncementBanner.tsx | 62 ------------------- .../Containers/MainPage/Banners/Banners.tsx | 2 - 2 files changed, 64 deletions(-) delete mode 100644 ui/apps/platform/src/Containers/MainPage/Banners/AnnouncementBanner.tsx diff --git a/ui/apps/platform/src/Containers/MainPage/Banners/AnnouncementBanner.tsx b/ui/apps/platform/src/Containers/MainPage/Banners/AnnouncementBanner.tsx deleted file mode 100644 index b23791b41c917..0000000000000 --- a/ui/apps/platform/src/Containers/MainPage/Banners/AnnouncementBanner.tsx +++ /dev/null @@ -1,62 +0,0 @@ -import { useEffect, useState } from 'react'; -import type { ReactElement } from 'react'; -import { Banner, Button } from '@patternfly/react-core'; - -import { fetchDatabaseStatus } from 'services/DatabaseService'; - -const ANNOUNCEMENT_BANNER_KEY = 'postgresAnnouncementBannerDismissed'; - -function AnnouncementBanner(): ReactElement | null { - const [isDisplayed, setIsDisplayed] = useState(false); - const [databaseType, setDatabaseType] = useState(''); - - useEffect(() => { - const localStorageValue = localStorage.getItem(ANNOUNCEMENT_BANNER_KEY); - const isBannerDismissed = localStorageValue - ? Boolean(JSON.parse(localStorageValue)) - : false; - setIsDisplayed(!isBannerDismissed); - - if (!isBannerDismissed) { - fetchDatabaseStatus() - .then((response) => { - setDatabaseType(response?.databaseType || ''); - }) - .catch(() => { - setDatabaseType(''); - }); - } - }, []); - - function handleDismissClick() { - localStorage.setItem(ANNOUNCEMENT_BANNER_KEY, JSON.stringify(true)); - setIsDisplayed(false); - } - - if (isDisplayed && databaseType !== 'PostgresDB') { - return ( - - - Red Hat Advanced Cluster Security plans to change its database to PostgreSQL in - an upcoming major release. This change will require you to back up your database - before upgrading. - - - - ); - } - return null; -} - -export default AnnouncementBanner; diff --git a/ui/apps/platform/src/Containers/MainPage/Banners/Banners.tsx b/ui/apps/platform/src/Containers/MainPage/Banners/Banners.tsx index c9bab79e7fbd2..ac1bcb1a307ca 100644 --- a/ui/apps/platform/src/Containers/MainPage/Banners/Banners.tsx +++ b/ui/apps/platform/src/Containers/MainPage/Banners/Banners.tsx @@ -4,7 +4,6 @@ import useCentralCapabilities from 'hooks/useCentralCapabilities'; import useIsScannerV4Enabled from 'hooks/useIsScannerV4Enabled'; import usePermissions from 'hooks/usePermissions'; -import AnnouncementBanner from './AnnouncementBanner'; import CredentialExpiryBanner from './CredentialExpiryBanner'; import DatabaseStatusBanner from './DatabaseStatusBanner'; import OutdatedVersionBanner from './OutdatedVersionBanner'; @@ -23,7 +22,6 @@ function Banners(): ReactElement { return ( <> - Date: Fri, 6 Feb 2026 15:38:00 +0100 Subject: [PATCH 135/232] ROX-31217: Remove datastore-level SAC checks in pod (#17377) --- central/pod/datastore/datastore_impl.go | 16 ++-------------- central/pod/datastore/datastore_impl_test.go | 6 +++--- central/pod/datastore/datastore_sac_test.go | 19 +++++++++++++------ 3 files changed, 18 insertions(+), 23 deletions(-) diff --git a/central/pod/datastore/datastore_impl.go b/central/pod/datastore/datastore_impl.go index 57ab915ad8a30..1f700c39b66e7 100644 --- a/central/pod/datastore/datastore_impl.go +++ b/central/pod/datastore/datastore_impl.go @@ -93,12 +93,6 @@ func (ds *datastoreImpl) WalkByQuery(ctx context.Context, q *v1.Query, fn func(p func (ds *datastoreImpl) UpsertPod(ctx context.Context, pod *storage.Pod) error { defer metrics.SetDatastoreFunctionDuration(time.Now(), resourceType, "Upsert") - if ok, err := podsSAC.WriteAllowed(ctx); err != nil { - return err - } else if !ok { - return sac.ErrResourceAccessDenied - } - ds.processFilter.UpdateByPod(pod) err := ds.keyedMutex.DoStatusWithLock(pod.GetId(), func() error { @@ -162,18 +156,10 @@ func mergeContainerInstances(newPod *storage.Pod, oldPod *storage.Pod) { func (ds *datastoreImpl) RemovePod(ctx context.Context, id string) error { defer metrics.SetDatastoreFunctionDuration(time.Now(), resourceType, "Delete") - if ok, err := podsSAC.WriteAllowed(ctx); err != nil { - return err - } else if !ok { - return sac.ErrResourceAccessDenied - } - pod, found, err := ds.podStore.Get(ctx, id) if err != nil || !found { return err } - ds.processFilter.DeleteByPod(pod) - err = ds.keyedMutex.DoStatusWithLock(id, func() error { return ds.podStore.Delete(ctx, id) }) @@ -181,6 +167,8 @@ func (ds *datastoreImpl) RemovePod(ctx context.Context, id string) error { return err } + ds.processFilter.DeleteByPod(pod) + deleteIndicatorsCtx := sac.WithGlobalAccessScopeChecker(ctx, sac.AllowFixedScopes( sac.AccessModeScopeKeys(storage.Access_READ_ACCESS, storage.Access_READ_WRITE_ACCESS), diff --git a/central/pod/datastore/datastore_impl_test.go b/central/pod/datastore/datastore_impl_test.go index bc1a299e1a5bc..15a1e48fa87df 100644 --- a/central/pod/datastore/datastore_impl_test.go +++ b/central/pod/datastore/datastore_impl_test.go @@ -62,9 +62,9 @@ func (suite *PodDataStoreTestSuite) TestNoAccessAllowed() { _, ok, _ := suite.datastore.GetPod(ctx, expectedPod.GetId()) suite.False(ok) - suite.Error(suite.datastore.UpsertPod(ctx, expectedPod), "permission denied") - - suite.Error(suite.datastore.RemovePod(ctx, expectedPod.GetId()), "permission denied") + // The datastore delegates the access control checks to the storage layer + // for UpsertPod and RemovePod. The "end-to-end" behaviour of the datastore + // for these functions is now tested in datastore_sac_test.go } func (suite *PodDataStoreTestSuite) TestGetPod() { diff --git a/central/pod/datastore/datastore_sac_test.go b/central/pod/datastore/datastore_sac_test.go index 138e66505af46..50b13a4b2f881 100644 --- a/central/pod/datastore/datastore_sac_test.go +++ b/central/pod/datastore/datastore_sac_test.go @@ -74,7 +74,7 @@ func (s *podDatastoreSACSuite) deletePod(id string) { } func (s *podDatastoreSACSuite) TestUpsertPod() { - cases := testutils.GenericGlobalSACUpsertTestCases(s.T(), testutils.VerbUpsert) + cases := testutils.GenericNamespaceSACUpsertTestCases(s.T(), testutils.VerbUpsert) for name, c := range cases { s.Run(name, func() { @@ -118,7 +118,7 @@ func (s *podDatastoreSACSuite) TestGetPod() { } func (s *podDatastoreSACSuite) TestRemovePod() { - cases := testutils.GenericGlobalSACDeleteTestCases(s.T()) + cases := testutils.GenericNamespaceSACDeleteTestCases(s.T()) for name, c := range cases { s.Run(name, func() { @@ -131,11 +131,18 @@ func (s *podDatastoreSACSuite) TestRemovePod() { defer s.deletePod(pod.GetId()) err = s.datastore.RemovePod(ctx, pod.GetId()) - if c.ExpectError { - s.Require().Error(err) - s.ErrorIs(err, c.ExpectedError) + s.NoError(err) + fetched, found, fetchErr := s.datastore.GetPod( + s.testContexts[testutils.UnrestrictedReadWriteCtx], + pod.GetId(), + ) + s.NoError(fetchErr) + if c.ExpectedFound { + s.True(found) + protoassert.Equal(s.T(), pod, fetched) } else { - s.NoError(err) + s.False(found) + s.Nil(fetched) } }) } From 9cbfb665e75522e839c7e5327014cb123a394991 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Feb 2026 16:18:45 +0100 Subject: [PATCH 136/232] chore(deps): bump github.com/Azure/azure-sdk-for-go-extensions from 0.3.0 to 0.5.0 (#18805) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Yann Brillouet --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 8df12dec79964..a982013e39344 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( cloud.google.com/go/securitycenter v1.38.1 cloud.google.com/go/storage v1.59.2 dario.cat/mergo v1.0.2 - github.com/Azure/azure-sdk-for-go-extensions v0.3.0 + github.com/Azure/azure-sdk-for-go-extensions v0.5.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 @@ -190,7 +190,7 @@ require ( github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.2.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect @@ -490,7 +490,7 @@ require ( go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect go.opentelemetry.io/otel v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect diff --git a/go.sum b/go.sum index 406885525b9fc..87b57b89dd67b 100644 --- a/go.sum +++ b/go.sum @@ -90,8 +90,8 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go-extensions v0.3.0 h1:+HplBrHcjlp6FtuzglDSUl8B2uDBU4+fC3GMa9+wbsU= -github.com/Azure/azure-sdk-for-go-extensions v0.3.0/go.mod h1:YEQyOAy4qwhmL3RRvvDvo6nyCtLiorJU+MZ+4xTxzoI= +github.com/Azure/azure-sdk-for-go-extensions v0.5.0 h1:Hvzr/oLQ1XsrD2AylWy0ii99Hz4Te4O/kZEfgC4B6mo= +github.com/Azure/azure-sdk-for-go-extensions v0.5.0/go.mod h1:kUQAe8kR4uL8fWbBUsrlb5WudbgkmLxuqNkzKUo+0DI= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= @@ -104,8 +104,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDo github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= github.com/Azure/azure-sdk-for-go/sdk/monitor/ingestion/azlogs v1.1.0 h1:Q+tp/BW0x11uAm5i9f2xEu3RZ3wy89KNYfDVCWFHUJQ= github.com/Azure/azure-sdk-for-go/sdk/monitor/ingestion/azlogs v1.1.0/go.mod h1:et3yi6OrdxM8YK0pfOwpHSLf4gWypxQVWh4T9wRzg3k= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.1.0 h1:P8Ke8bp6CfxeGxPjMFIlduLSHL/g4sIF4/rWDbK0bzs= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.1.0/go.mod h1:5y85ofjphRqoQcJ/29URxw4lYly0kURib43BEWAw/8o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.2.0 h1:aXzpyYcHexm3eSlvy6g7r3cshXtGcEg6VJpOdrN0Us0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v8 v8.2.0/go.mod h1:vs/o7so4c3csg/CM0LDrqxSKDxcKgeYbgI3zaL6vu7U= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= @@ -1681,8 +1681,8 @@ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= From e8b7c39b99e325b1901c0110e28fef479ecaa7d5 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Fri, 6 Feb 2026 18:00:55 -0600 Subject: [PATCH 137/232] ROX-32983: match base images in delegateEnrichImage (#18856) --- pkg/images/enricher/enricher_impl.go | 64 +++++-- pkg/images/enricher/enricher_impl_test.go | 169 ++++++++++++++---- pkg/images/enricher/enricher_v2_impl.go | 64 +++++-- pkg/images/enricher/enricher_v2_impl_test.go | 170 +++++++++++++++---- 4 files changed, 359 insertions(+), 108 deletions(-) diff --git a/pkg/images/enricher/enricher_impl.go b/pkg/images/enricher/enricher_impl.go index a99eaa1a157db..238f25d11cd1a 100644 --- a/pkg/images/enricher/enricher_impl.go +++ b/pkg/images/enricher/enricher_impl.go @@ -84,6 +84,7 @@ func (e *enricherImpl) EnrichWithVulnerabilities(image *storage.Image, component }, noImageScannersErr } + e.enrichWithBaseImage(context.Background(), image) for _, imageScanner := range scanners.GetAll() { scanner := imageScanner.GetScanner() if vulnScanner, ok := scanner.(scannerTypes.ImageVulnerabilityGetter); ok { @@ -175,6 +176,8 @@ func (e *enricherImpl) delegateEnrichImage(ctx context.Context, enrichCtx Enrich if exists && cachedImageIsValid(existingImg) { updated := e.updateImageWithExistingImage(image, existingImg, enrichCtx.FetchOpt) if updated { + // Image is cached, but we force base image detection in case base images tags were updated. + e.enrichWithBaseImage(ctx, image) e.cvesSuppressorV2.EnrichImageWithSuppressedCVEs(image) // Errors for signature verification will be logged, so we can safely ignore them for the time being. _, _ = e.enrichWithSignatureVerificationData(ctx, enrichCtx, image) @@ -241,6 +244,48 @@ func (e *enricherImpl) updateImageWithExistingImage(image *storage.Image, existi return e.useExistingScan(image, existingImage, option) } +func (e *enricherImpl) enrichWithBaseImage(ctx context.Context, image *storage.Image) { + if !features.BaseImageDetection.Enabled() { + return + } + if image.GetMetadata() == nil { + log.Warnw("Matching image with base images failed as there's no image metadata", + logging.ImageID(image.GetId()), + logging.String("image_name", image.GetName().GetFullName())) + return + } + layers := image.GetMetadata().GetLayerShas() + if len(layers) == 0 { + log.Warnw("Matching image with base images failed as there's no image layer SHAs", + logging.ImageID(image.GetId()), + logging.String("image_name", image.GetName().GetFullName())) + return + } + + adminCtx := sac.WithGlobalAccessScopeChecker(ctx, + sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), + sac.ResourceScopeKeys(resources.ImageAdministration), + ), + ) + + matchedBaseImages, err := e.baseImageGetter(adminCtx, layers) + if err != nil { + log.Warnw("Matching image with base images failed", + logging.ImageID(image.GetId()), + logging.Err(err), + logging.String("image_name", image.GetName().GetFullName())) + return + } + + if len(matchedBaseImages) > 0 { + log.Debugw("Matching image with base images succeeded", + logging.ImageID(image.GetId()), + logging.String("image_name", image.GetName().GetFullName())) + image.BaseImageInfo = toBaseImageInfos(image.GetMetadata(), matchedBaseImages) + } +} + // EnrichImage enriches an image with the integration set present. func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext EnrichmentContext, image *storage.Image) (EnrichmentResult, error) { shouldDelegate, err := e.delegateEnrichImage(ctx, enrichContext, image) @@ -290,24 +335,7 @@ func (e *enricherImpl) EnrichImage(ctx context.Context, enrichContext Enrichment updated = updated || didUpdateMetadata - if features.BaseImageDetection.Enabled() { - adminCtx := - sac.WithGlobalAccessScopeChecker(ctx, - sac.AllowFixedScopes( - sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), - sac.ResourceScopeKeys(resources.ImageAdministration), - ), - ) - matchedBaseImages, err := e.baseImageGetter(adminCtx, image.GetMetadata().GetLayerShas()) - if err != nil { - log.Warnw("Matching image with base images", - logging.FromContext(ctx), - logging.ImageID(image.GetId()), - logging.Err(err), - logging.String("request_image", image.GetName().GetFullName())) - } - image.BaseImageInfo = toBaseImageInfos(image.GetMetadata(), matchedBaseImages) - } + e.enrichWithBaseImage(ctx, image) updated = updated || len(image.GetBaseImageInfo()) > 0 // Update the image with existing values depending on the FetchOption provided or whether any are available. diff --git a/pkg/images/enricher/enricher_impl_test.go b/pkg/images/enricher/enricher_impl_test.go index 314fe0c68f079..ba996156257c5 100644 --- a/pkg/images/enricher/enricher_impl_test.go +++ b/pkg/images/enricher/enricher_impl_test.go @@ -55,18 +55,30 @@ var ( _ types.ImageRegistry = (*fakeRegistryScanner)(nil) ) +type baseImageGetterMock struct { + callCount int +} + +func (m *baseImageGetterMock) get(_ context.Context, _ []string) ([]*storage.BaseImage, error) { + m.callCount++ + return nil, nil +} + func TestEnricherFlow(t *testing.T) { + t.Setenv(features.BaseImageDetection.EnvVar(), "true") + cases := []struct { - name string - ctx EnrichmentContext - inMetadataCache bool - shortCircuitRegistry bool - shortCircuitScanner bool - image *storage.Image - imageGetter ImageGetter - fsr *fakeRegistryScanner - result EnrichmentResult - errorExpected bool + name string + ctx EnrichmentContext + inMetadataCache bool + shortCircuitRegistry bool + shortCircuitScanner bool + image *storage.Image + imageGetter ImageGetter + fsr *fakeRegistryScanner + result EnrichmentResult + errorExpected bool + expectedBaseImageCalls int }{ { name: "nothing in the cache", @@ -78,8 +90,10 @@ func TestEnricherFlow(t *testing.T) { Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, requestedScan: true, @@ -88,6 +102,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "scan and metadata in both caches", @@ -99,6 +114,9 @@ func TestEnricherFlow(t *testing.T) { shortCircuitScanner: true, image: &storage.Image{ Id: "id", + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, imageGetter: imageGetterFromImage(&storage.Image{ Id: "id", @@ -114,6 +132,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in both caches, but force refetch", @@ -125,8 +144,10 @@ func TestEnricherFlow(t *testing.T) { Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, requestedScan: true, @@ -135,6 +156,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: " data in both caches but force refetch use names", @@ -146,6 +168,9 @@ func TestEnricherFlow(t *testing.T) { Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, @@ -155,6 +180,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in both caches but force refetch scans only", @@ -165,8 +191,10 @@ func TestEnricherFlow(t *testing.T) { image: &storage.Image{ Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: true, @@ -175,6 +203,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "set ScannerTypeHint to something not found in integrations", @@ -187,8 +216,8 @@ func TestEnricherFlow(t *testing.T) { image: &storage.Image{ Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + // no need to pass metadata }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: false, @@ -197,6 +226,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 0, }, { name: "set ScannerTypeHint to something found in integrations", @@ -208,8 +238,10 @@ func TestEnricherFlow(t *testing.T) { image: &storage.Image{ Id: "id", Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: true, @@ -218,6 +250,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data not in caches, and no external metadata", @@ -227,8 +260,7 @@ func TestEnricherFlow(t *testing.T) { inMetadataCache: false, shortCircuitRegistry: true, shortCircuitScanner: true, - image: &storage.Image{Id: "id"}, - + image: &storage.Image{Id: "id", Name: &storage.ImageName{Registry: "reg"}}, fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: false, @@ -237,6 +269,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: false, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 0, }, { name: "data not in cache, but image already has metadata and scan", @@ -248,7 +281,7 @@ func TestEnricherFlow(t *testing.T) { shortCircuitScanner: true, image: &storage.Image{ Id: "id", - Metadata: &storage.ImageMetadata{}, + Metadata: &storage.ImageMetadata{DataSource: &storage.DataSource{Id: "exists"}, LayerShas: []string{"SHA1"}}, Scan: &storage.ImageScan{}, Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, @@ -261,6 +294,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: false, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 1, }, { name: "data not in cache and ignore existing images", @@ -274,7 +308,10 @@ func TestEnricherFlow(t *testing.T) { Registry: "reg", }, Names: []*storage.ImageName{{Registry: "reg"}}, - Scan: &storage.ImageScan{}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, + Scan: &storage.ImageScan{}, }, imageGetter: imageGetterPanicOnCall, fsr: newFakeRegistryScanner(opts{ @@ -285,6 +322,7 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in cache and ignore existing images", @@ -296,7 +334,7 @@ func TestEnricherFlow(t *testing.T) { shortCircuitScanner: false, image: &storage.Image{ Id: "id", - Metadata: &storage.ImageMetadata{}, + Metadata: &storage.ImageMetadata{DataSource: &storage.DataSource{Id: "exists"}, LayerShas: []string{"SHA1"}}, Scan: &storage.ImageScan{}, Name: &storage.ImageName{Registry: "reg"}, Names: []*storage.ImageName{{Registry: "reg"}}, @@ -310,18 +348,17 @@ func TestEnricherFlow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, } for _, c := range cases { t.Run(c.name, func(t *testing.T) { ctrl := gomock.NewController(t) - fsr := newFakeRegistryScanner(opts{}) registrySet := registryMocks.NewMockSet(ctrl) registrySet.EXPECT().Get(gomock.Any()).Return(fsr).AnyTimes() - set := mocks.NewMockSet(ctrl) set.EXPECT().RegistrySet().AnyTimes().Return(registrySet) @@ -332,43 +369,48 @@ func TestEnricherFlow(t *testing.T) { scannerSet := scannerMocks.NewMockSet(ctrl) if !c.shortCircuitScanner { - scannerSet.EXPECT().IsEmpty().Return(false) + scannerSet.EXPECT().IsEmpty().Return(false).AnyTimes() scannerSet.EXPECT().GetAll().Return([]scannertypes.ImageScannerWithDataSource{fsr}).AnyTimes() - set.EXPECT().ScannerSet().Return(scannerSet) + set.EXPECT().ScannerSet().Return(scannerSet).AnyTimes() } mockReporter := reporterMocks.NewMockReporter(ctrl) mockReporter.EXPECT().UpdateIntegrationHealthAsync(gomock.Any()).AnyTimes() + mockBaseGetter := &baseImageGetterMock{} + enricherImpl := &enricherImpl{ cvesSuppressorV2: &fakeCVESuppressorV2{}, integrations: set, errorsPerScanner: map[scannertypes.ImageScannerWithDataSource]int32{fsr: 0}, errorsPerRegistry: map[types.ImageRegistry]int32{fsr: 0}, integrationHealthReporter: mockReporter, - metadataLimiter: rate.NewLimiter(rate.Every(50*time.Millisecond), 1), + metadataLimiter: rate.NewLimiter(rate.Inf, 0), metadataCache: newCache(), metrics: newMetrics(pkgMetrics.CentralSubsystem), imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetter, + baseImageGetter: mockBaseGetter.get, } - if c.inMetadataCache { - enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) + + if c.inMetadataCache && c.image != nil { + enricherImpl.metadataCache.Add(getRef(c.image), c.image.GetMetadata()) } if c.imageGetter != nil { enricherImpl.imageGetter = c.imageGetter } - result, err := enricherImpl.EnrichImage(emptyCtx, c.ctx, c.image) - if !c.errorExpected { - require.NoError(t, err) - } else { + + result, err := enricherImpl.EnrichImage(context.Background(), c.ctx, c.image) + + if c.errorExpected { require.Error(t, err) + } else { + require.NoError(t, err) } - assert.Equal(t, c.result, result) - assert.Equal(t, c.fsr, fsr) + assert.Equal(t, c.result, result) + assert.Equal(t, c.expectedBaseImageCalls, mockBaseGetter.callCount, "Mismatch in: %s", c.name) }) } } @@ -1086,9 +1128,14 @@ func TestDelegateEnrichImage(t *testing.T) { func TestEnrichImage_Delegate(t *testing.T) { deleEnrichCtx := EnrichmentContext{Delegable: true} + + // Track call counts + biMock := &baseImageGetterMock{} + e := enricherImpl{ cvesSuppressorV2: &fakeCVESuppressorV2{}, imageGetter: emptyImageGetter, + baseImageGetter: biMock.get, // Inject the tracker } var dele *delegatorMocks.MockDelegator @@ -1096,6 +1143,7 @@ func TestEnrichImage_Delegate(t *testing.T) { ctrl := gomock.NewController(t) dele = delegatorMocks.NewMockDelegator(ctrl) e.scanDelegator = dele + e.baseImageGetter = biMock.get // Reset for every sub-test } t.Run("delegate enrich error", func(t *testing.T) { @@ -1106,18 +1154,65 @@ func TestEnrichImage_Delegate(t *testing.T) { assert.Equal(t, result.ScanResult, ScanNotDone) assert.False(t, result.ImageUpdated) assert.ErrorIs(t, err, errBroken) + + // Verify: Should NOT call baseImageGetter on delegation failure + assert.Equal(t, 0, biMock.callCount) }) t.Run("delegate enrich success", func(t *testing.T) { setup(t) - fakeImage := &storage.Image{} + fakeImage := &storage.Image{ + Id: "sha256:delegate", + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"layer1"}, + }, + } + dele.EXPECT().GetDelegateClusterID(emptyCtx, gomock.Any()).Return("cluster-id", true, nil) dele.EXPECT().DelegateScanImage(emptyCtx, gomock.Any(), "cluster-id", "", gomock.Any()).Return(fakeImage, nil) result, err := e.EnrichImage(emptyCtx, deleEnrichCtx, fakeImage) + assert.Equal(t, result.ScanResult, ScanSucceeded) assert.True(t, result.ImageUpdated) assert.NoError(t, err) + + // No cached image. + assert.Equal(t, 0, biMock.callCount) + }) + + t.Run("delegate enrich success with image cache", func(t *testing.T) { + setup(t) + + inputImage := &storage.Image{ + Id: "sha256:delegate-cached", + Name: &storage.ImageName{FullName: "reg/img:tag"}, + } + + cachedImage := &storage.Image{ + Id: "sha256:delegate-cached", + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"layer1", "layer2"}, + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{{Instruction: "FROM"}}, + }, + }, + Scan: &storage.ImageScan{ + ScannerVersion: "1.0", + }, + } + e.imageGetter = imageGetterFromImage(cachedImage) + + dele.EXPECT().GetDelegateClusterID(gomock.Any(), gomock.Any()).Return("cluster-id", true, nil) + + result, err := e.EnrichImage(emptyCtx, deleEnrichCtx, inputImage) + + assert.NoError(t, err) + assert.True(t, result.ImageUpdated) + assert.Equal(t, ScanSucceeded, result.ScanResult) + + assert.Equal(t, 1, biMock.callCount, "Base image getter should be called for cached delegated images") + assert.NotNil(t, inputImage.GetMetadata()) }) } @@ -1410,12 +1505,12 @@ func TestEnrichImageWithBaseImages(t *testing.T) { testImpl := &enricherImpl{ cvesSuppressorV2: &fakeCVESuppressorV2{}, integrations: set, - metadataLimiter: rate.NewLimiter(rate.Inf, 0), + metadataLimiter: rate.NewLimiter(rate.Every(50*time.Millisecond), 1), metadataCache: newCache(), metrics: newMetrics(pkgMetrics.CentralSubsystem), imageGetter: emptyImageGetter, signatureIntegrationGetter: emptySignatureIntegrationGetter, - baseImageGetter: mockBaseImageGetter, // Updated field name/type + baseImageGetter: mockBaseImageGetter, integrationHealthReporter: reporterMocks.NewMockReporter(ctrl), } diff --git a/pkg/images/enricher/enricher_v2_impl.go b/pkg/images/enricher/enricher_v2_impl.go index c0dd35d450a35..3d9f3b1ac836f 100644 --- a/pkg/images/enricher/enricher_v2_impl.go +++ b/pkg/images/enricher/enricher_v2_impl.go @@ -74,6 +74,7 @@ func (e *enricherV2Impl) EnrichWithVulnerabilities(imageV2 *storage.ImageV2, com }, errors.New("no image scanners are integrated") } + e.enrichWithBaseImage(context.Background(), imageV2) for _, imageScanner := range scanners.GetAll() { scanner := imageScanner.GetScanner() if vulnScanner, ok := scanner.(scannerTypes.ImageVulnerabilityGetter); ok { @@ -166,6 +167,8 @@ func (e *enricherV2Impl) delegateEnrichImage(ctx context.Context, enrichCtx Enri if exists && cachedImageV2IsValid(existingImg) { updated := e.updateImageWithExistingImage(imageV2, existingImg, enrichCtx.FetchOpt) if updated { + // Image is cached, but we force base image detection in case base images tags were updated. + e.enrichWithBaseImage(ctx, imageV2) e.cvesSuppressor.EnrichImageV2WithSuppressedCVEs(imageV2) // Errors for signature verification will be logged, so we can safely ignore them for the time being. _, _ = e.enrichWithSignatureVerificationData(ctx, enrichCtx, imageV2) @@ -229,6 +232,48 @@ func (e *enricherV2Impl) updateImageWithExistingImage(imageV2 *storage.ImageV2, return e.useExistingScan(imageV2, existingImageV2, option) } +func (e *enricherV2Impl) enrichWithBaseImage(ctx context.Context, imageV2 *storage.ImageV2) { + if !features.BaseImageDetection.Enabled() { + return + } + if imageV2.GetMetadata() == nil { + log.Warnw("Matching image with base images failed as there's no image metadata", + logging.ImageID(imageV2.GetId()), + logging.String("image_name", imageV2.GetName().GetFullName())) + return + } + layers := imageV2.GetMetadata().GetLayerShas() + if len(layers) == 0 { + log.Warnw("Matching image with base images failed as there's no image layer SHAs", + logging.ImageID(imageV2.GetId()), + logging.String("image_name", imageV2.GetName().GetFullName())) + return + } + + adminCtx := sac.WithGlobalAccessScopeChecker(ctx, + sac.AllowFixedScopes( + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), + sac.ResourceScopeKeys(resources.ImageAdministration), + ), + ) + + matchedBaseImages, err := e.baseImageGetter(adminCtx, layers) + if err != nil { + log.Warnw("Matching image with base images failed", + logging.ImageID(imageV2.GetId()), + logging.Err(err), + logging.String("image_name", imageV2.GetName().GetFullName())) + return + } + + if len(matchedBaseImages) > 0 { + log.Debugw("Matching image with base images succeeded", + logging.ImageID(imageV2.GetId()), + logging.String("image_name", imageV2.GetName().GetFullName())) + imageV2.BaseImageInfo = toBaseImageInfos(imageV2.GetMetadata(), matchedBaseImages) + } +} + // EnrichImage enriches an image with the integration set present. func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext EnrichmentContext, imageV2 *storage.ImageV2) (EnrichmentResult, error) { shouldDelegate, err := e.delegateEnrichImage(ctx, enrichContext, imageV2) @@ -278,24 +323,7 @@ func (e *enricherV2Impl) EnrichImage(ctx context.Context, enrichContext Enrichme updated = updated || didUpdateMetadata - if features.BaseImageDetection.Enabled() { - adminCtx := - sac.WithGlobalAccessScopeChecker(ctx, - sac.AllowFixedScopes( - sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), - sac.ResourceScopeKeys(resources.ImageAdministration), - ), - ) - matchedBaseImages, err := e.baseImageGetter(adminCtx, imageV2.GetMetadata().GetLayerShas()) - if err != nil { - log.Warnw("Matching image with base images", - logging.FromContext(ctx), - logging.ImageID(imageV2.GetId()), - logging.Err(err), - logging.String("request_image", imageV2.GetName().GetFullName())) - } - imageV2.BaseImageInfo = toBaseImageInfos(imageV2.GetMetadata(), matchedBaseImages) - } + e.enrichWithBaseImage(ctx, imageV2) updated = updated || len(imageV2.GetBaseImageInfo()) > 0 // Update the image with existing values depending on the FetchOption provided or whether any are available. diff --git a/pkg/images/enricher/enricher_v2_impl_test.go b/pkg/images/enricher/enricher_v2_impl_test.go index 7859c0e9ad0ee..90fc438f18816 100644 --- a/pkg/images/enricher/enricher_v2_impl_test.go +++ b/pkg/images/enricher/enricher_v2_impl_test.go @@ -17,6 +17,7 @@ import ( reporterMocks "github.com/stackrox/rox/pkg/integrationhealth/mocks" pkgMetrics "github.com/stackrox/rox/pkg/metrics" "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/protocompat" registryMocks "github.com/stackrox/rox/pkg/registries/mocks" "github.com/stackrox/rox/pkg/registries/types" scannerMocks "github.com/stackrox/rox/pkg/scanners/mocks" @@ -56,19 +57,32 @@ var ( _ types.ImageRegistry = (*fakeRegistryScanner)(nil) ) +type baseImageV2GetterMock struct { + callCount int +} + +func (m *baseImageV2GetterMock) get(_ context.Context, _ []string) ([]*storage.BaseImage, error) { + m.callCount++ + return nil, nil +} + func TestEnricherV2Flow(t *testing.T) { testutils.MustUpdateFeature(t, features.FlattenImageData, true) + // Ensure base image detection logic is toggled on for the test + t.Setenv(features.BaseImageDetection.EnvVar(), "true") + cases := []struct { - name string - ctx EnrichmentContext - inMetadataCache bool - shortCircuitRegistry bool - shortCircuitScanner bool - image *storage.ImageV2 - imageGetter ImageGetterV2 - fsr *fakeRegistryScanner - result EnrichmentResult - errorExpected bool + name string + ctx EnrichmentContext + inMetadataCache bool + shortCircuitRegistry bool + shortCircuitScanner bool + image *storage.ImageV2 + imageGetter ImageGetterV2 + fsr *fakeRegistryScanner + result EnrichmentResult + errorExpected bool + expectedBaseImageCalls int // track Base Image logic }{ { name: "nothing in the cache", @@ -80,8 +94,10 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, requestedScan: true, @@ -90,6 +106,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "scan and metadata in both caches", @@ -103,13 +120,15 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, imageGetter: imageGetterV2FromImage(&storage.ImageV2{ Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, Scan: &storage.ImageScan{}}), - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: false, @@ -118,6 +137,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in both caches, but force refetch", @@ -129,8 +149,10 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, requestedScan: true, @@ -139,6 +161,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: " data in both caches but force refetch use names", @@ -150,6 +173,9 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, fsr: newFakeRegistryScanner(opts{ requestedMetadata: true, @@ -159,6 +185,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in both caches but force refetch scans only", @@ -170,8 +197,10 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: true, @@ -180,6 +209,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "set ScannerTypeHint to something not found in integrations", @@ -193,8 +223,8 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + // no need to pass metadata }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: false, @@ -203,6 +233,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 0, }, { name: "set ScannerTypeHint to something found in integrations", @@ -215,8 +246,10 @@ func TestEnricherV2Flow(t *testing.T) { Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: true, @@ -225,6 +258,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data not in caches, and no external metadata", @@ -239,7 +273,6 @@ func TestEnricherV2Flow(t *testing.T) { Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, }, - fsr: newFakeRegistryScanner(opts{ requestedMetadata: false, requestedScan: false, @@ -248,6 +281,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: false, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 0, }, { name: "data not in cache, but image already has metadata and scan", @@ -260,7 +294,7 @@ func TestEnricherV2Flow(t *testing.T) { image: &storage.ImageV2{ Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", - Metadata: &storage.ImageMetadata{}, + Metadata: &storage.ImageMetadata{DataSource: &storage.DataSource{Id: "exists"}, LayerShas: []string{"SHA1"}}, Scan: &storage.ImageScan{}, Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, }, @@ -272,6 +306,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: false, ScanResult: ScanNotDone, }, + expectedBaseImageCalls: 1, }, { name: "data not in cache and ignore existing images", @@ -284,6 +319,9 @@ func TestEnricherV2Flow(t *testing.T) { Digest: "sha", Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, Scan: &storage.ImageScan{}, + Metadata: &storage.ImageMetadata{ + LayerShas: []string{"SHA1"}, + }, }, imageGetter: imageGetterV2PanicOnCall, fsr: newFakeRegistryScanner(opts{ @@ -294,6 +332,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, { name: "data in cache and ignore existing images", @@ -306,7 +345,7 @@ func TestEnricherV2Flow(t *testing.T) { image: &storage.ImageV2{ Id: utils.NewImageV2ID(&storage.ImageName{Registry: "reg", FullName: "reg"}, "sha"), Digest: "sha", - Metadata: &storage.ImageMetadata{}, + Metadata: &storage.ImageMetadata{DataSource: &storage.DataSource{Id: "exists"}, LayerShas: []string{"SHA1"}}, Scan: &storage.ImageScan{}, Name: &storage.ImageName{Registry: "reg", FullName: "reg"}, }, @@ -319,6 +358,7 @@ func TestEnricherV2Flow(t *testing.T) { ImageUpdated: true, ScanResult: ScanSucceeded, }, + expectedBaseImageCalls: 1, }, } @@ -341,43 +381,49 @@ func TestEnricherV2Flow(t *testing.T) { scannerSet := scannerMocks.NewMockSet(ctrl) if !c.shortCircuitScanner { - scannerSet.EXPECT().IsEmpty().Return(false) + scannerSet.EXPECT().IsEmpty().Return(false).AnyTimes() scannerSet.EXPECT().GetAll().Return([]scannertypes.ImageScannerWithDataSource{fsr}).AnyTimes() - set.EXPECT().ScannerSet().Return(scannerSet) + set.EXPECT().ScannerSet().Return(scannerSet).AnyTimes() } mockReporter := reporterMocks.NewMockReporter(ctrl) mockReporter.EXPECT().UpdateIntegrationHealthAsync(gomock.Any()).AnyTimes() + // Setup mock for base image getter + mockBaseGetter := &baseImageV2GetterMock{} + enricherImpl := &enricherV2Impl{ cvesSuppressor: &fakeCVESuppressorV2{}, integrations: set, errorsPerScanner: map[scannertypes.ImageScannerWithDataSource]int32{fsr: 0}, errorsPerRegistry: map[types.ImageRegistry]int32{fsr: 0}, integrationHealthReporter: mockReporter, - metadataLimiter: rate.NewLimiter(rate.Every(50*time.Millisecond), 1), + metadataLimiter: rate.NewLimiter(rate.Inf, 0), metadataCache: newCache(), metrics: newMetrics(pkgMetrics.CentralSubsystem), imageGetter: emptyImageGetterV2, signatureIntegrationGetter: emptySignatureIntegrationGetter, signatureFetcher: &fakeSigFetcher{}, - baseImageGetter: emptyBaseImageGetterV2, + baseImageGetter: mockBaseGetter.get, } - if c.inMetadataCache { + + if c.inMetadataCache && c.image != nil { enricherImpl.metadataCache.Add(c.image.GetId(), c.image.GetMetadata()) } if c.imageGetter != nil { enricherImpl.imageGetter = c.imageGetter } - result, err := enricherImpl.EnrichImage(emptyCtx, c.ctx, c.image) - if !c.errorExpected { - require.NoError(t, err) - } else { + + result, err := enricherImpl.EnrichImage(context.Background(), c.ctx, c.image) + + if c.errorExpected { require.Error(t, err) + } else { + require.NoError(t, err) } - assert.Equal(t, c.result, result) - assert.Equal(t, c.fsr, fsr) + assert.Equal(t, c.result, result) + assert.Equal(t, c.expectedBaseImageCalls, mockBaseGetter.callCount, "Mismatch in: %s", c.name) }) } } @@ -1063,9 +1109,13 @@ func TestDelegateEnrichImageV2(t *testing.T) { func TestEnrichImageV2_Delegate(t *testing.T) { testutils.MustUpdateFeature(t, features.FlattenImageData, true) deleEnrichCtx := EnrichmentContext{Delegable: true} + + biMock := &baseImageV2GetterMock{} + e := enricherV2Impl{ - cvesSuppressor: &fakeCVESuppressorV2{}, - imageGetter: emptyImageGetterV2, + cvesSuppressor: &fakeCVESuppressorV2{}, + imageGetter: emptyImageGetterV2, + baseImageGetter: biMock.get, // Inject tracker } var dele *delegatorMocks.MockDelegator @@ -1073,6 +1123,7 @@ func TestEnrichImageV2_Delegate(t *testing.T) { ctrl := gomock.NewController(t) dele = delegatorMocks.NewMockDelegator(ctrl) e.scanDelegator = dele + e.baseImageGetter = biMock.get // Reset for every sub-test } t.Run("delegate enrich error", func(t *testing.T) { @@ -1080,21 +1131,70 @@ func TestEnrichImageV2_Delegate(t *testing.T) { dele.EXPECT().GetDelegateClusterID(emptyCtx, gomock.Any()).Return("", true, errBroken) result, err := e.EnrichImage(emptyCtx, deleEnrichCtx, nil) - assert.Equal(t, result.ScanResult, ScanNotDone) + + assert.Equal(t, ScanNotDone, result.ScanResult) assert.False(t, result.ImageUpdated) assert.ErrorIs(t, err, errBroken) + assert.Equal(t, 0, biMock.callCount, "Base image should not be fetched on error") }) t.Run("delegate enrich success", func(t *testing.T) { setup(t) - fakeImage := &storage.ImageV2{} + fakeImage := &storage.ImageV2{Id: "sha256:delegate"} + dele.EXPECT().GetDelegateClusterID(emptyCtx, gomock.Any()).Return("cluster-id", true, nil) dele.EXPECT().DelegateScanImageV2(emptyCtx, gomock.Any(), "cluster-id", "", gomock.Any()).Return(fakeImage, nil) result, err := e.EnrichImage(emptyCtx, deleEnrichCtx, fakeImage) - assert.Equal(t, result.ScanResult, ScanSucceeded) + + assert.Equal(t, ScanSucceeded, result.ScanResult) assert.True(t, result.ImageUpdated) assert.NoError(t, err) + + assert.Equal(t, 0, biMock.callCount, "Base image should not be fetched for fresh scans") + }) + t.Run("delegate enrich success with image cache", func(t *testing.T) { + setup(t) + + const sha = "sha256:delegate-cached" + imgName := &storage.ImageName{FullName: "reg/img:tag"} + + derivedID := utils.NewImageV2ID(imgName, sha) + + inputImage := &storage.ImageV2{ + Id: derivedID, // Use the derived ID + Digest: sha, + Name: imgName, + } + + cachedImage := &storage.ImageV2{ + Id: derivedID, // Use the same derived ID + Digest: sha, + Name: imgName, + Metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: []*storage.ImageLayer{{Instruction: "FROM"}}, + }, + V2: &storage.V2Metadata{ + Digest: sha, + }, + LayerShas: []string{"layer1", "layer2"}, + }, + Scan: &storage.ImageScan{ + ScannerVersion: "1.0", + ScanTime: protocompat.TimestampNow(), + }, + } + + e.imageGetter = imageGetterV2FromImage(cachedImage) + + dele.EXPECT().GetDelegateClusterID(gomock.Any(), gomock.Any()).Return("cluster-id", true, nil) + + result, err := e.EnrichImage(emptyCtx, deleEnrichCtx, inputImage) + + assert.NoError(t, err) + assert.True(t, result.ImageUpdated) + assert.Equal(t, 1, biMock.callCount, "Base image getter should be hit") }) } From fbe6daba1649503cf699d21c1bbf9fef0916cdda Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Mon, 9 Feb 2026 01:39:26 -0700 Subject: [PATCH 138/232] chore(release): Add 4.9.4 to scanner updater configuration (#18897) Co-authored-by: mclasmeier --- scanner/updater/version/RELEASE_VERSION | 1 + 1 file changed, 1 insertion(+) diff --git a/scanner/updater/version/RELEASE_VERSION b/scanner/updater/version/RELEASE_VERSION index d408f27fcc697..d4a970c731718 100644 --- a/scanner/updater/version/RELEASE_VERSION +++ b/scanner/updater/version/RELEASE_VERSION @@ -59,3 +59,4 @@ 4.9.0 4.9.1 4.9.2 +4.9.4 From be770395eca79a459b117977de671b393501a3c9 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Mon, 9 Feb 2026 01:40:08 -0700 Subject: [PATCH 139/232] chore(release): Add 4.8.9 to scanner updater configuration (#18898) Co-authored-by: mclasmeier --- scanner/updater/version/RELEASE_VERSION | 1 + 1 file changed, 1 insertion(+) diff --git a/scanner/updater/version/RELEASE_VERSION b/scanner/updater/version/RELEASE_VERSION index d4a970c731718..3288586370601 100644 --- a/scanner/updater/version/RELEASE_VERSION +++ b/scanner/updater/version/RELEASE_VERSION @@ -56,6 +56,7 @@ 4.8.6 4.8.7 4.8.8 +4.8.9 4.9.0 4.9.1 4.9.2 From 336c1f1700364a7932e3eb4c0ab8542ce8f4f3ad Mon Sep 17 00:00:00 2001 From: Kyle Lape Date: Mon, 9 Feb 2026 09:42:20 -0500 Subject: [PATCH 140/232] ROX-33012: GCO_ENABLED=1 for roxagent (#18869) After roxagent was added to the main container image, it is now subject to FIPS compliance requirements. The make targets were configured for roxagent to be used as a CLI similar to roxctl, which is not built with GCO_ENABLED=1. Thus roxagent is not FIPS compliant, which will block Konflux release until it's resolved. --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 799959e64cc3c..16d764410c463 100644 --- a/Makefile +++ b/Makefile @@ -429,7 +429,7 @@ endif roxagent: roxagent-build .PHONY: cli-build -cli-build: roxctl-build roxagent-build +cli-build: roxctl-build .PHONY: cli-install cli-install: roxctl-install @@ -491,7 +491,8 @@ main-build-nodeps: migrator \ sensor/admission-control \ sensor/kubernetes \ - sensor/upgrader + sensor/upgrader \ + compliance/virtualmachines/roxagent ifndef CI CGO_ENABLED=0 $(GOBUILD) roxctl endif From d07390367c26429e551dff6c8ac1e4982a5e5897 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Mon, 9 Feb 2026 18:01:41 +0100 Subject: [PATCH 141/232] ROX-32817: refactor wait in compliance tests (#18841) Co-authored-by: Guzman --- tests/compliance_operator_v2_test.go | 59 +++++++++++++++------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/tests/compliance_operator_v2_test.go b/tests/compliance_operator_v2_test.go index aaa1087ab80a0..066fbee1f609f 100644 --- a/tests/compliance_operator_v2_test.go +++ b/tests/compliance_operator_v2_test.go @@ -37,10 +37,12 @@ import ( ) const ( - coNamespaceV2 = "openshift-compliance" - stackroxNamespace = "stackrox" - defaultTimeout = 90 * time.Second - eventuallyTimeout = 120 * time.Second + coNamespaceV2 = "openshift-compliance" + stackroxNamespace = "stackrox" + defaultTimeout = 90 * time.Second + eventuallyTimeout = 120 * time.Second + waitForDoneTimeout = 5 * time.Minute + waitForDoneInterval = 30 * time.Second ) var ( @@ -131,27 +133,25 @@ func createDynamicClient(t testutils.T) dynclient.Client { func waitForComplianceSuiteToComplete(t *testing.T, suiteName string, interval, timeout time.Duration) { client := createDynamicClient(t) - - ticker := time.NewTicker(interval) - defer ticker.Stop() - ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - t.Logf("Waiting for ComplianceSuite to reach DONE phase") - for range ticker.C { - var suite complianceoperatorv1.ComplianceSuite - mustEventually(t, ctx, func() error { - return client.Get(ctx, types.NamespacedName{Name: suiteName, Namespace: "openshift-compliance"}, &suite) - - }, timeout, fmt.Sprintf("failed to get ComplianceSuite %s", suiteName)) + t.Logf("Waiting for ComplianceSuite %s to reach DONE phase", suiteName) + require.EventuallyWithT(t, func(c *assert.CollectT) { + callCtx, callCancel := context.WithTimeout(ctx, interval) + defer callCancel() - if suite.Status.Phase == "DONE" { - t.Logf("ComplianceSuite %s reached DONE phase", suiteName) - return - } - t.Logf("ComplianceSuite %s is in %s phase", suiteName, suite.Status.Phase) - } + var suite complianceoperatorv1.ComplianceSuite + err := client.Get(callCtx, + types.NamespacedName{Name: suiteName, Namespace: "openshift-compliance"}, + &suite, + ) + require.NoError(c, err, "failed to get ComplianceSuite %s", suiteName) + + require.Equal(c, complianceoperatorv1.PhaseDone, suite.Status.Phase, + "ComplianceSuite %s not DONE (current phase is %q)", suiteName, suite.Status.Phase) + }, timeout, interval) + t.Logf("ComplianceSuite %s has reached DONE phase", suiteName) } func cleanUpResources(ctx context.Context, t *testing.T, resourceName string, namespace string) { @@ -705,10 +705,16 @@ func TestComplianceV2ComplianceObjectMetadata(t *testing.T) { // Ensure the ScanSetting and ScanSettingBinding have ACS metadata client := createDynamicClient(t) var scanSetting complianceoperatorv1.ScanSetting - mustEventually(t, ctx, func() error { - return client.Get(context.TODO(), types.NamespacedName{Name: testName, Namespace: "openshift-compliance"}, &scanSetting) + require.EventuallyWithT(t, func(c *assert.CollectT) { + callCtx, callCancel := context.WithTimeout(ctx, 10*time.Second) + defer callCancel() - }, timeout, fmt.Sprintf("failed to get ScanSetting %s", testName)) + err := client.Get(callCtx, + types.NamespacedName{Name: testName, Namespace: "openshift-compliance"}, + &scanSetting, + ) + require.NoError(c, err, "failed to get ScanSetting %s", testName) + }, defaultTimeout, 5*time.Second) assert.Contains(t, scanSetting.Labels, "app.kubernetes.io/name") assert.Equal(t, scanSetting.Labels["app.kubernetes.io/name"], "stackrox") @@ -738,7 +744,6 @@ func getscanConfigID(configName string, scanConfigs []*v2.ComplianceScanConfigur if scanConfigs[i].GetScanName() == configName { configID = scanConfigs[i].GetId() } - } return configID } @@ -775,12 +780,12 @@ func TestComplianceV2ScheduleRescan(t *testing.T) { defer client.DeleteComplianceScanConfiguration(context.TODO(), &v2.ResourceByID{Id: scanConfig.GetId()}) - waitForComplianceSuiteToComplete(t, scanConfig.ScanName, 2*time.Second, 5*time.Minute) + waitForComplianceSuiteToComplete(t, scanConfig.ScanName, waitForDoneInterval, waitForDoneTimeout) // Invoke a rescan _, err = client.RunComplianceScanConfiguration(context.TODO(), &v2.ResourceByID{Id: scanConfig.GetId()}) require.NoError(t, err, "failed to rerun scan schedule %s", scanConfigName) // Assert the scan is rerunning on the cluster using the Compliance Operator CRDs - waitForComplianceSuiteToComplete(t, scanConfig.ScanName, 2*time.Second, 5*time.Minute) + waitForComplianceSuiteToComplete(t, scanConfig.ScanName, waitForDoneInterval, waitForDoneTimeout) } From dd186b1df6b274f05b827df1ec858b5d13951c81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Feb 2026 15:35:15 -0500 Subject: [PATCH 142/232] chore(deps-dev): bump webpack from 5.100.2 to 5.105.0 in /ui/apps/platform (#18885) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- ui/apps/platform/package-lock.json | 170 ++++++++++++++++++++++------- ui/apps/platform/package.json | 2 +- 2 files changed, 131 insertions(+), 41 deletions(-) diff --git a/ui/apps/platform/package-lock.json b/ui/apps/platform/package-lock.json index 2ead5ea6a3958..77807139bb220 100644 --- a/ui/apps/platform/package-lock.json +++ b/ui/apps/platform/package-lock.json @@ -147,7 +147,7 @@ "vite": "^6.4.1", "vite-plugin-svgr": "^4.3.0", "vitest": "^3.0.5", - "webpack": "^5.99.9", + "webpack": "^5.105.0", "webpack-cli": "^6.0.1", "webpack-dev-server": "^5.2.2" }, @@ -5744,6 +5744,16 @@ } ] }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.19", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", + "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, "node_modules/batch": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz", @@ -6112,9 +6122,9 @@ "integrity": "sha1-FkpUg+Yw+kMh5a8HAg5TGDGyYJs=" }, "node_modules/caniuse-lite": { - "version": "1.0.30001731", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001731.tgz", - "integrity": "sha512-lDdp2/wrOmTRWuoB5DpfNkC0rJDU8DqRa6nYL6HK6sytw70QMopt/NIc/9SM7ylItlBWfACXk0tEn37UWM/+mg==", + "version": "1.0.30001768", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001768.tgz", + "integrity": "sha512-qY3aDRZC5nWPgHUgIB84WL+nySuo19wk0VJpp/XI9T34lrvkyhRvNVOFJOp2kxClQhiFBu+TaUSudf6oa3vkSA==", "dev": true, "funding": [ { @@ -8130,9 +8140,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.195", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.195.tgz", - "integrity": "sha512-URclP0iIaDUzqcAyV1v2PgduJ9N0IdXmWsnPzPfelvBmjmZzEy6xJcjb1cXj+TbYqXgtLrjHEoaSIdTYhw4ezg==", + "version": "1.5.286", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz", + "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==", "dev": true, "license": "ISC" }, @@ -8171,14 +8181,14 @@ } }, "node_modules/enhanced-resolve": { - "version": "5.18.2", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.2.tgz", - "integrity": "sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ==", + "version": "5.19.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.19.0.tgz", + "integrity": "sha512-phv3E1Xl4tQOShqSte26C7Fl84EwUdZsyOuSSk9qtAGyyQs2s3jJzComh+Abf4g187lUUAvH+H26omrqia2aGg==", "dev": true, "license": "MIT", "dependencies": { "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" + "tapable": "^2.3.0" }, "engines": { "node": ">=10.13.0" @@ -12130,13 +12140,17 @@ } }, "node_modules/loader-runner": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz", - "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.1.tgz", + "integrity": "sha512-IWqP2SCPhyVFTBtRcgMHdzlf9ul25NwaFx4wCEH/KjAXuuHY4yNjvPXsBokp8jCB936PyWRaPKUNh8NvylLp2Q==", "dev": true, "license": "MIT", "engines": { "node": ">=6.11.5" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" } }, "node_modules/locate-path": { @@ -14024,9 +14038,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.19", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.19.tgz", - "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", "dev": true, "license": "MIT" }, @@ -16923,9 +16937,9 @@ } }, "node_modules/schema-utils": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.2.tgz", - "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.3.3.tgz", + "integrity": "sha512-eflK8wEtyOE6+hsaRVPxvUKYCpRgzLqDTb8krvAsRIwOGlHoSgYLgBXoubGgLd2fT41/OUYdb48v4k4WWHQurA==", "dev": true, "license": "MIT", "dependencies": { @@ -18183,13 +18197,17 @@ } }, "node_modules/tapable": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.2.tgz", - "integrity": "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz", + "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==", "dev": true, "license": "MIT", "engines": { "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" } }, "node_modules/terser": { @@ -18211,9 +18229,9 @@ } }, "node_modules/terser-webpack-plugin": { - "version": "5.3.14", - "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", - "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", + "version": "5.3.16", + "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.16.tgz", + "integrity": "sha512-h9oBFCWrq78NyWWVcSwZarJkZ01c2AyGrzs1crmHZO3QUg9D61Wu4NPjBy69n7JqylFF5y+CsUZYmYEIZ3mR+Q==", "dev": true, "license": "MIT", "dependencies": { @@ -20124,9 +20142,9 @@ } }, "node_modules/watchpack": { - "version": "2.4.4", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.4.tgz", - "integrity": "sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==", + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.5.1.tgz", + "integrity": "sha512-Zn5uXdcFNIA1+1Ei5McRd+iRzfhENPCe7LeABkJtNulSxjma+l7ltNx55BWZkRlwRnpOgHqxnjyaDgJnNXnqzg==", "dev": true, "license": "MIT", "dependencies": { @@ -20200,9 +20218,9 @@ } }, "node_modules/webpack": { - "version": "5.100.2", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.100.2.tgz", - "integrity": "sha512-QaNKAvGCDRh3wW1dsDjeMdDXwZm2vqq3zn6Pvq4rHOEOGSaUMgOOjG2Y9ZbIGzpfkJk9ZYTHpDqgDfeBDcnLaw==", + "version": "5.105.0", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.105.0.tgz", + "integrity": "sha512-gX/dMkRQc7QOMzgTe6KsYFM7DxeIONQSui1s0n/0xht36HvrgbxtM1xBlgx596NbpHuQU8P7QpKwrZYwUX48nw==", "dev": true, "license": "MIT", "dependencies": { @@ -20214,22 +20232,22 @@ "@webassemblyjs/wasm-parser": "^1.14.1", "acorn": "^8.15.0", "acorn-import-phases": "^1.0.3", - "browserslist": "^4.24.0", + "browserslist": "^4.28.1", "chrome-trace-event": "^1.0.2", - "enhanced-resolve": "^5.17.2", - "es-module-lexer": "^1.2.1", + "enhanced-resolve": "^5.19.0", + "es-module-lexer": "^2.0.0", "eslint-scope": "5.1.1", "events": "^3.2.0", "glob-to-regexp": "^0.4.1", "graceful-fs": "^4.2.11", "json-parse-even-better-errors": "^2.3.1", - "loader-runner": "^4.2.0", + "loader-runner": "^4.3.1", "mime-types": "^2.1.27", "neo-async": "^2.6.2", - "schema-utils": "^4.3.2", - "tapable": "^2.1.1", - "terser-webpack-plugin": "^5.3.11", - "watchpack": "^2.4.1", + "schema-utils": "^4.3.3", + "tapable": "^2.3.0", + "terser-webpack-plugin": "^5.3.16", + "watchpack": "^2.5.1", "webpack-sources": "^3.3.3" }, "bin": { @@ -20435,6 +20453,78 @@ "node": ">=10.13.0" } }, + "node_modules/webpack/node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/webpack/node_modules/es-module-lexer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", + "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/webpack/node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, "node_modules/websocket-driver": { "version": "0.7.4", "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz", diff --git a/ui/apps/platform/package.json b/ui/apps/platform/package.json index f801c57a1051d..36489db92be8d 100644 --- a/ui/apps/platform/package.json +++ b/ui/apps/platform/package.json @@ -176,7 +176,7 @@ "vite": "^6.4.1", "vite-plugin-svgr": "^4.3.0", "vitest": "^3.0.5", - "webpack": "^5.99.9", + "webpack": "^5.105.0", "webpack-cli": "^6.0.1", "webpack-dev-server": "^5.2.2" }, From 586cbadff03a2615f36624b4bb9022b3a7cb12e0 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Mon, 9 Feb 2026 15:43:51 -0500 Subject: [PATCH 143/232] ROX-33039: Delete ROX_ADMISSION_CONTROLLER_CONFIG in ui/cypress (#18905) --- .../cypress/integration/clusters/Clusters.selectors.js | 6 +++--- .../integration/clusters/clustersHealthStatus.test.js | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/ui/apps/platform/cypress/integration/clusters/Clusters.selectors.js b/ui/apps/platform/cypress/integration/clusters/Clusters.selectors.js index 85bfd4d4c9617..045ed0193d6e9 100644 --- a/ui/apps/platform/cypress/integration/clusters/Clusters.selectors.js +++ b/ui/apps/platform/cypress/integration/clusters/Clusters.selectors.js @@ -7,9 +7,9 @@ export const selectors = { // Ignore the first checkbox column and last delete column. tableDataCell: '.rt-tr-group:not(.hidden) .rt-td:not(:first-child):not(.hidden)', }, - clusterForm: scopeSelectors('[data-testid="cluster-form"]', { - nameInput: 'input[name="name"]', - }), + clusterForm: { + nameInput: `.pf-v5-c-form__group-label:contains("Cluster name") + .pf-v5-c-form__group-control input`, + }, clusterHealth: scopeSelectors(clusterPageSelector, { clusterStatus: '[data-testid="clusterStatus"]', sensorStatus: '[data-testid="sensorStatus"]', diff --git a/ui/apps/platform/cypress/integration/clusters/clustersHealthStatus.test.js b/ui/apps/platform/cypress/integration/clusters/clustersHealthStatus.test.js index c81be365d17f1..19e0e3ec465b6 100644 --- a/ui/apps/platform/cypress/integration/clusters/clustersHealthStatus.test.js +++ b/ui/apps/platform/cypress/integration/clusters/clustersHealthStatus.test.js @@ -1,5 +1,4 @@ import withAuth from '../../helpers/basicAuth'; -import { hasFeatureFlag } from '../../helpers/features'; import { visitClusterByNameWithFixtureMetadataDatetime, @@ -246,10 +245,7 @@ describe.skip('Clusters Health Status', () => { datetimeISOString ); - const nameInputSelector = hasFeatureFlag('ROX_ADMISSION_CONTROLLER_CONFIG') - ? `.pf-v5-c-form__group-label:contains("Cluster name") + .pf-v5-c-form__group-control input` - : selectors.clusterForm.nameInput; - cy.get(nameInputSelector).should('have.value', clusterName); + cy.get(selectors.clusterForm.nameInput).should('have.value', clusterName); // Cluster Status cy.get(selectors.clusterHealth.clusterStatus).should('have.text', clusterStatus); From 76d51d2fec5f65461d5db47fff9e3afd602c8564 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl?= Date: Mon, 9 Feb 2026 22:50:35 +0100 Subject: [PATCH 144/232] refactor: custom metrics configuration management (#18312) --- .../metrics/custom/tracker/configuration.go | 4 ++ .../metrics/custom/tracker/tracker_base.go | 71 +++++++++---------- .../custom/tracker/tracker_base_test.go | 4 +- 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/central/metrics/custom/tracker/configuration.go b/central/metrics/custom/tracker/configuration.go index f411d8c91f6c1..0900f7e60ceca 100644 --- a/central/metrics/custom/tracker/configuration.go +++ b/central/metrics/custom/tracker/configuration.go @@ -41,3 +41,7 @@ type Configuration struct { toDelete []MetricName period time.Duration } + +func (cfg *Configuration) isEnabled() bool { + return cfg != nil && len(cfg.metrics) > 0 && cfg.period > 0 +} diff --git a/central/metrics/custom/tracker/tracker_base.go b/central/metrics/custom/tracker/tracker_base.go index f55a353ed3b04..6a642960c52de 100644 --- a/central/metrics/custom/tracker/tracker_base.go +++ b/central/metrics/custom/tracker/tracker_base.go @@ -20,6 +20,7 @@ import ( "github.com/stackrox/rox/pkg/set" "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/pkg/telemetry/phonehome/telemeter" + "github.com/stackrox/rox/pkg/utils" ) const inactiveGathererTTL = 2 * 24 * time.Hour @@ -72,6 +73,30 @@ type gatherer[F Finding] struct { config *Configuration } +// updateMetrics aggregates the fetched findings and updates the gauges. +func (g *gatherer[F]) updateMetrics(generator iter.Seq2[F, error]) error { + g.aggregator.reset() + for finding, err := range generator { + if err != nil { + return err + } + g.aggregator.count(finding) + } + g.registry.Lock() + defer g.registry.Unlock() + for metric, records := range g.aggregator.result { + g.registry.Reset(string(metric)) + for _, rec := range records { + g.registry.SetTotal(string(metric), rec.labels, rec.total) + } + } + return nil +} + +func (g *gatherer[F]) trySetRunning() bool { + return g.running.CompareAndSwap(false, true) +} + // TrackerBase implements a generic finding tracker. // Configured with a finding generator and other arguments, it runs a goroutine // that periodically aggregates gathered values and updates the gauge values. @@ -139,7 +164,7 @@ func (tracker *TrackerBase[F]) Reconfigure(cfg *Configuration) { } previous := tracker.setConfiguration(cfg) if previous != nil { - if cfg.period == 0 { + if !cfg.isEnabled() { log.Debugf("Metrics collection has been disabled for %s", tracker.description) tracker.unregisterMetrics(slices.Collect(maps.Keys(previous.metrics))) return @@ -235,6 +260,8 @@ func (tracker *TrackerBase[F]) getConfiguration() *Configuration { return tracker.config } +// setConfiguration updates the tracker configuration and returns the previous +// one. func (tracker *TrackerBase[F]) setConfiguration(config *Configuration) *Configuration { tracker.metricsConfigMux.Lock() defer tracker.metricsConfigMux.Unlock() @@ -243,39 +270,15 @@ func (tracker *TrackerBase[F]) setConfiguration(config *Configuration) *Configur return previous } -// track aggregates the fetched findings and updates the gauges. -func (tracker *TrackerBase[F]) track(ctx context.Context, gatherer *gatherer[F], cfg *Configuration) error { - if len(cfg.metrics) == 0 { - return nil - } - aggregator := gatherer.aggregator - registry := gatherer.registry - aggregator.reset() - for finding, err := range tracker.generator(ctx, cfg.metrics) { - if err != nil { - return err - } - aggregator.count(finding) - } - registry.Lock() - defer registry.Unlock() - for metric, records := range aggregator.result { - registry.Reset(string(metric)) - for _, rec := range records { - registry.SetTotal(string(metric), rec.labels, rec.total) - } - } - return nil -} - // Gather the data not more often then maxAge. func (tracker *TrackerBase[F]) Gather(ctx context.Context) { - id, err := authn.IdentityFromContext(ctx) - if err != nil { + cfg := tracker.getConfiguration() + if !cfg.isEnabled() { return } - cfg := tracker.getConfiguration() - if cfg == nil { + id, err := authn.IdentityFromContext(ctx) + if err != nil { + utils.Should(err) return } // Pass the cfg so that the same configuration is used there and here. @@ -287,11 +290,11 @@ func (tracker *TrackerBase[F]) Gather(ctx context.Context) { defer tracker.cleanupInactiveGatherers() defer gatherer.running.Store(false) - if cfg.period == 0 || time.Since(gatherer.lastGather) < cfg.period { + if time.Since(gatherer.lastGather) < cfg.period { return } begin := time.Now() - if err := tracker.track(ctx, gatherer, cfg); err != nil { + if err := gatherer.updateMetrics(tracker.generator(ctx, cfg.metrics)); err != nil { log.Errorf("Failed to gather %s metrics: %v", tracker.description, err) } end := time.Now() @@ -364,10 +367,6 @@ func (tracker *TrackerBase[F]) getGatherer(userID string, cfg *Configuration) *g return gr } -func (g *gatherer[F]) trySetRunning() bool { - return g.running.CompareAndSwap(false, true) -} - // cleanupInactiveGatherers frees the registries for the userIDs, that haven't // shown up for inactiveGathererTTL. func (tracker *TrackerBase[F]) cleanupInactiveGatherers() { diff --git a/central/metrics/custom/tracker/tracker_base_test.go b/central/metrics/custom/tracker/tracker_base_test.go index 62282eec5a218..06241404b71d7 100644 --- a/central/metrics/custom/tracker/tracker_base_test.go +++ b/central/metrics/custom/tracker/tracker_base_test.go @@ -197,7 +197,7 @@ func TestTrackerBase_Track(t *testing.T) { registry: rf, aggregator: makeAggregator(tracker.config.metrics, tracker.config.includeFilters, tracker.config.excludeFilters, tracker.getters), } - assert.NoError(t, tracker.track(context.Background(), testGatherer, tracker.config)) + assert.NoError(t, testGatherer.updateMetrics(tracker.generator(context.Background(), tracker.config.metrics))) if assert.Len(t, result, 2) && assert.Contains(t, result, "test_TestTrackerBase_Track_metric1") && @@ -261,7 +261,7 @@ func TestTrackerBase_error(t *testing.T) { registry: rf, aggregator: makeAggregator(tracker.config.metrics, tracker.config.includeFilters, tracker.config.excludeFilters, tracker.getters), } - assert.ErrorIs(t, tracker.track(context.Background(), testGatherer, tracker.config), + assert.ErrorIs(t, testGatherer.updateMetrics(tracker.generator(context.Background(), tracker.config.metrics)), errox.InvariantViolation) } From 773e872283914838773bc886a92dfc1d09ce2849 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 06:53:02 +0100 Subject: [PATCH 145/232] chore(deps): bump the operator-framework group across 1 directory with 2 updates (#17742) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: StackRox PR Fixxxer --- .../config.stackrox.io_securitypolicies.yaml | 1 - .../platform.stackrox.io_centrals.yaml | 1 - .../platform.stackrox.io_securedclusters.yaml | 1 - ...er-manager-metrics-service_v1_service.yaml | 1 - ...c.authorization.k8s.io_v1_clusterrole.yaml | 1 - operator/bundle/tests/scorecard/config.yaml | 12 +- .../scorecard-versioned/kustomization.yaml | 2 +- operator/tools/operator-sdk/go.mod | 203 +++---- operator/tools/operator-sdk/go.sum | 512 +++++++++--------- 9 files changed, 376 insertions(+), 358 deletions(-) diff --git a/operator/bundle/manifests/config.stackrox.io_securitypolicies.yaml b/operator/bundle/manifests/config.stackrox.io_securitypolicies.yaml index 19c85123e63e5..1358ef7c127dd 100644 --- a/operator/bundle/manifests/config.stackrox.io_securitypolicies.yaml +++ b/operator/bundle/manifests/config.stackrox.io_securitypolicies.yaml @@ -3,7 +3,6 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.20.0 - creationTimestamp: null labels: app: rhacs-operator name: securitypolicies.config.stackrox.io diff --git a/operator/bundle/manifests/platform.stackrox.io_centrals.yaml b/operator/bundle/manifests/platform.stackrox.io_centrals.yaml index ecc7446c169b8..bce5980dde9bc 100644 --- a/operator/bundle/manifests/platform.stackrox.io_centrals.yaml +++ b/operator/bundle/manifests/platform.stackrox.io_centrals.yaml @@ -3,7 +3,6 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.20.0 - creationTimestamp: null labels: app: rhacs-operator name: centrals.platform.stackrox.io diff --git a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml index 51e189b4c716d..373bc64ad6527 100644 --- a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml +++ b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml @@ -3,7 +3,6 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.20.0 - creationTimestamp: null labels: app: rhacs-operator name: securedclusters.platform.stackrox.io diff --git a/operator/bundle/manifests/rhacs-operator-controller-manager-metrics-service_v1_service.yaml b/operator/bundle/manifests/rhacs-operator-controller-manager-metrics-service_v1_service.yaml index d4ab1a2adc3ab..1f0671f3168ca 100644 --- a/operator/bundle/manifests/rhacs-operator-controller-manager-metrics-service_v1_service.yaml +++ b/operator/bundle/manifests/rhacs-operator-controller-manager-metrics-service_v1_service.yaml @@ -1,7 +1,6 @@ apiVersion: v1 kind: Service metadata: - creationTimestamp: null labels: app: rhacs-operator control-plane: controller-manager diff --git a/operator/bundle/manifests/rhacs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/operator/bundle/manifests/rhacs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index e1fc5adfa10a4..40170066c1185 100644 --- a/operator/bundle/manifests/rhacs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/operator/bundle/manifests/rhacs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -1,7 +1,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null labels: app: rhacs-operator name: rhacs-operator-metrics-reader diff --git a/operator/bundle/tests/scorecard/config.yaml b/operator/bundle/tests/scorecard/config.yaml index 6ffe8227fa74c..6a6e414080972 100644 --- a/operator/bundle/tests/scorecard/config.yaml +++ b/operator/bundle/tests/scorecard/config.yaml @@ -8,7 +8,7 @@ stages: - entrypoint: - scorecard-test - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: basic test: basic-check-spec-test @@ -18,7 +18,7 @@ stages: - entrypoint: - scorecard-test - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-bundle-validation-test @@ -28,7 +28,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-validation-test @@ -38,7 +38,7 @@ stages: - entrypoint: - scorecard-test - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-crds-have-resources-test @@ -48,7 +48,7 @@ stages: - entrypoint: - scorecard-test - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-spec-descriptors-test @@ -58,7 +58,7 @@ stages: - entrypoint: - scorecard-test - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.41.1 + image: quay.io/operator-framework/scorecard-test:v1.42.0 labels: suite: olm test: olm-status-descriptors-test diff --git a/operator/config/scorecard-versioned/kustomization.yaml b/operator/config/scorecard-versioned/kustomization.yaml index efe47df07e0f5..447702e1a4a35 100644 --- a/operator/config/scorecard-versioned/kustomization.yaml +++ b/operator/config/scorecard-versioned/kustomization.yaml @@ -7,4 +7,4 @@ resources: images: - name: quay.io/operator-framework/scorecard-test newName: quay.io/operator-framework/scorecard-test - newTag: v1.41.1 + newTag: v1.42.0 diff --git a/operator/tools/operator-sdk/go.mod b/operator/tools/operator-sdk/go.mod index 42458720f2df1..ee5f992345639 100644 --- a/operator/tools/operator-sdk/go.mod +++ b/operator/tools/operator-sdk/go.mod @@ -1,10 +1,10 @@ module github.com/stackrox/rox/operator/tools/operator-sdk -go 1.25.0 +go 1.25.3 require ( - github.com/operator-framework/operator-lifecycle-manager v0.30.0 - github.com/operator-framework/operator-sdk v1.41.1 + github.com/operator-framework/operator-lifecycle-manager v0.40.0 + github.com/operator-framework/operator-sdk v1.42.0 ) // These are copied verbatim from @@ -22,11 +22,11 @@ replace ( ) require ( - cel.dev/expr v0.24.0 // indirect + cel.dev/expr v0.25.1 // indirect dario.cat/mergo v1.0.2 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect - github.com/BurntSushi/toml v1.5.0 // indirect + github.com/BurntSushi/toml v1.6.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect @@ -43,9 +43,11 @@ require ( github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/containerd/cgroups/v3 v3.0.5 // indirect - github.com/containerd/containerd v1.7.29 // indirect - github.com/containerd/containerd/api v1.9.0 // indirect + github.com/clipperhouse/stringish v0.1.1 // indirect + github.com/clipperhouse/uax29/v2 v2.5.0 // indirect + github.com/containerd/cgroups/v3 v3.1.2 // indirect + github.com/containerd/containerd v1.7.30 // indirect + github.com/containerd/containerd/api v1.10.0 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect @@ -54,23 +56,20 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.64.1 // indirect - github.com/containers/image/v5 v5.36.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.59.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/cli v29.2.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v28.5.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.9.3 // indirect - github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/docker-credential-helpers v0.9.5 // indirect + github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect @@ -78,42 +77,51 @@ require ( github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fxamacker/cbor/v2 v2.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-air/gini v1.0.4 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.2 // indirect - github.com/go-git/go-git/v5 v5.16.2 // indirect + github.com/go-git/go-billy/v5 v5.7.0 // indirect + github.com/go-git/go-git/v5 v5.16.4 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.1 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/go-sql-driver/mysql v1.9.2 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/envy v1.6.5 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-migrate/migrate/v4 v4.18.3 // indirect + github.com/golang-migrate/migrate/v4 v4.19.1 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect - github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.26.0 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/cel-go v0.27.0 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-containerregistry v0.20.7 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -121,29 +129,27 @@ require ( github.com/huandu/xstrings v1.5.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/itchyny/gojq v0.12.17 // indirect - github.com/itchyny/timefmt-go v0.1.6 // indirect + github.com/itchyny/gojq v0.12.18 // indirect + github.com/itchyny/timefmt-go v0.1.7 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/joelanford/ignore v0.1.1 // indirect github.com/joho/godotenv v1.3.0 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.1 // indirect + github.com/klauspost/compress v1.18.3 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.9.0 // indirect github.com/markbates/inflect v1.0.4 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.32 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/mattn/go-sqlite3 v1.14.33 // indirect github.com/miekg/dns v1.1.61 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/miekg/pkcs11 v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect @@ -158,124 +164,123 @@ require ( github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nxadm/tail v1.4.11 // indirect - github.com/onsi/gomega v1.38.2 // indirect + github.com/onsi/gomega v1.39.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/opencontainers/runtime-spec v1.2.1 // indirect + github.com/opencontainers/runtime-spec v1.3.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect - github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a // indirect - github.com/operator-framework/ansible-operator-plugins v1.39.0 // indirect - github.com/operator-framework/api v0.34.0 // indirect + github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 // indirect + github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a // indirect + github.com/operator-framework/ansible-operator-plugins v1.42.0 // indirect + github.com/operator-framework/api v0.39.0 // indirect github.com/operator-framework/operator-manifest-tools v0.10.0 // indirect - github.com/operator-framework/operator-registry v1.57.0 // indirect + github.com/operator-framework/operator-registry v1.63.0 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/proglottis/gpgme v0.1.4 // indirect + github.com/proglottis/gpgme v0.1.6 // indirect github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.5 // indirect - github.com/prometheus/procfs v0.16.1 // indirect - github.com/rivo/uniseg v0.4.7 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/fulcio v1.8.5 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect github.com/sigstore/sigstore v1.10.4 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect github.com/smallstep/pkcs7 v0.2.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.7.1 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect - github.com/spf13/viper v1.20.1 // indirect + github.com/spf13/viper v1.21.0 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/stoewer/go-strcase v1.3.1 // indirect github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/thoas/go-funk v0.9.3 // indirect - github.com/ulikunitz/xz v0.5.14 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/vbatts/tar-split v0.12.2 // indirect - github.com/vbauerster/mpb/v8 v8.10.2 // indirect + github.com/vbauerster/mpb/v8 v8.11.3 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.etcd.io/bbolt v1.4.3 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.6.0 // indirect - go.etcd.io/etcd/client/v3 v3.6.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect + go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.podman.io/common v0.66.1 // indirect + go.podman.io/image/v5 v5.38.0 // indirect + go.podman.io/storage v1.61.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/mod v0.31.0 // indirect - golang.org/x/net v0.48.0 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect - golang.org/x/tools v0.40.0 // indirect - golang.org/x/tools/go/expect v0.1.1-deprecated // indirect - golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect - gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + golang.org/x/tools v0.41.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect + google.golang.org/genproto v0.0.0-20260203192932-546029d2fa20 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect google.golang.org/grpc v1.78.0 // indirect google.golang.org/protobuf v1.36.11 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.18.6 // indirect - k8s.io/api v0.33.4 // indirect - k8s.io/apiextensions-apiserver v0.33.4 // indirect - k8s.io/apimachinery v0.33.4 // indirect - k8s.io/apiserver v0.33.4 // indirect - k8s.io/cli-runtime v0.33.3 // indirect - k8s.io/client-go v0.33.4 // indirect - k8s.io/component-base v0.33.4 // indirect + k8s.io/api v0.35.0 // indirect + k8s.io/apiextensions-apiserver v0.35.0 // indirect + k8s.io/apimachinery v0.35.0 // indirect + k8s.io/apiserver v0.35.0 // indirect + k8s.io/cli-runtime v0.35.0 // indirect + k8s.io/client-go v0.35.0 // indirect + k8s.io/component-base v0.35.0 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-aggregator v0.33.4 // indirect - k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a // indirect - k8s.io/kubectl v0.33.3 // indirect - k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + k8s.io/kube-aggregator v0.35.0 // indirect + k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 // indirect + k8s.io/kubectl v0.35.0 // indirect + k8s.io/utils v0.0.0-20260108192941-914a6e750570 // indirect oras.land/oras-go/v2 v2.6.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect - sigs.k8s.io/controller-runtime v0.21.0 // indirect - sigs.k8s.io/controller-tools v0.18.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 // indirect + sigs.k8s.io/controller-runtime v0.23.1 // indirect + sigs.k8s.io/controller-tools v0.20.0 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kubebuilder/v4 v4.6.0 // indirect - sigs.k8s.io/kustomize/api v0.19.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/operator/tools/operator-sdk/go.sum b/operator/tools/operator-sdk/go.sum index f40eeca1caff8..30b6640e803fe 100644 --- a/operator/tools/operator-sdk/go.sum +++ b/operator/tools/operator-sdk/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= @@ -10,8 +10,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -52,13 +52,17 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U= +github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= -github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= -github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= -github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= -github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= +github.com/containerd/cgroups/v3 v3.1.2 h1:OSosXMtkhI6Qove637tg1XgK4q+DhR0mX8Wi8EhrHa4= +github.com/containerd/cgroups/v3 v3.1.2/go.mod h1:PKZ2AcWmSBsY/tJUVhtS/rluX0b1uq1GmPO1ElCmbOw= +github.com/containerd/containerd v1.7.30 h1:/2vezDpLDVGGmkUXmlNPLCCNKHJ5BbC5tJB5JNzQhqE= +github.com/containerd/containerd v1.7.30/go.mod h1:fek494vwJClULlTpExsmOyKCMUAbuVjlFsJQc4/j44M= +github.com/containerd/containerd/api v1.10.0 h1:5n0oHYVBwN4VhoX9fFykCV9dF1/BvAXeg2F8W6UYq1o= +github.com/containerd/containerd/api v1.10.0/go.mod h1:NBm1OAk8ZL+LG8R0ceObGxT5hbUYj7CzTmR3xh0DlMM= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -75,28 +79,22 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.64.1 h1:E8vSiL+B84/UCsyVSb70GoxY9cu+0bseLujm4EKF6GE= -github.com/containers/common v0.64.1/go.mod h1:CtfQNHoCAZqWeXMwdShcsxmMJSeGRgKKMqAwRKmWrHE= -github.com/containers/image/v5 v5.36.1 h1:6zpXBqR59UcAzoKpa/By5XekeqFV+htWYfr65+Cgjqo= -github.com/containers/image/v5 v5.36.1/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs= -github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= +github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= +github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -109,24 +107,24 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= -github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.2.1+incompatible h1:n3Jt0QVCN65eiVBoUTZQM9mcQICCJt3akW4pKAbKdJg= +github.com/docker/cli v29.2.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= -github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk= -github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/docker-credential-helpers v0.9.5 h1:EFNN8DHvaiK8zVqFA2DT6BjXE0GzfLOZ38ggPTKePkY= +github.com/docker/docker-credential-helpers v0.9.5/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4= +github.com/docker/go-events v0.0.0-20250808211157-605354379745/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= -github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -150,8 +148,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= -github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-air/gini v1.0.4 h1:lteMAxHKNOAjIqazL/klOJJmxq6YxxSuJ17MnMXny+s= @@ -160,10 +158,10 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= -github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= -github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM= -github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-git/go-billy/v5 v5.7.0 h1:83lBUJhGWhYp0ngzCMSgllhUSuoHP1iEWYjsPl9nwqM= +github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= +github.com/go-git/go-git/v5 v5.16.4 h1:7ajIEZHZJULcyJebDLo99bGgS0jRrOxzZG4uCk2Yb2Y= +github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= @@ -176,12 +174,40 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= -github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= -github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= @@ -198,8 +224,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs= -github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY= +github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA= +github.com/golang-migrate/migrate/v4 v4.19.1/go.mod h1:CTcgfjxhaUtsLipnLoQRWCrjYXycRz/g5+RWDuYgPrE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= @@ -220,10 +246,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= -github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -239,10 +265,8 @@ github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= -github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno= +github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -258,8 +282,8 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -269,6 +293,7 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -279,10 +304,10 @@ github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSAS github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/itchyny/gojq v0.12.17 h1:8av8eGduDb5+rvEdaOO+zQUjA04MS0m3Ps8HiD+fceg= -github.com/itchyny/gojq v0.12.17/go.mod h1:WBrEMkgAfAGO1LUcGOckBl5O726KPp+OlkKug0I/FEY= -github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= -github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= +github.com/itchyny/gojq v0.12.18 h1:gFGHyt/MLbG9n6dqnvlliiya2TaMMh6FFaR2b1H6Drc= +github.com/itchyny/gojq v0.12.18/go.mod h1:4hPoZ/3lN9fDL1D+aK7DY1f39XZpY9+1Xpjz8atrEkg= +github.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4GMMA= +github.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -291,14 +316,12 @@ github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgn github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= +github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -315,23 +338,21 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= -github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0= +github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.2 h1:/VxmeAX5qU6Q3EwafypogwWbYryHFmF2RpkJmw3m4MQ= +github.com/miekg/pkcs11 v1.1.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -361,8 +382,9 @@ github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFL github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -373,38 +395,40 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.25.2 h1:hepmgwx1D+llZleKQDMEvy8vIlCxMGt7W5ZxDjIEhsw= -github.com/onsi/ginkgo/v2 v2.25.2/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= -github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= +github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 h1:PxjGCA72RtsdHWToZLkjjeWm7WXXx4cuv0u4gtvLbrk= github.com/openshift/api v0.0.0-20221021112143-4226c2167e40/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= -github.com/operator-framework/ansible-operator-plugins v1.39.0 h1:JLlbdGdnGnF8q8WInq24Upde/jfWwRzIJ4gK4xjRLHc= -github.com/operator-framework/ansible-operator-plugins v1.39.0/go.mod h1:XLMYrKfowmX5leL8V4trkgtxfsXYdLynzyHamOR5xJc= -github.com/operator-framework/api v0.34.0 h1:REiEaYhG1CWmDoajdcAdZqtgoljWG+ixMY59vUX5pFI= -github.com/operator-framework/api v0.34.0/go.mod h1:eGncUNIYvWtfGCCKmLzGXvoi3P0TDf3Yd/Z0Sn9E6SQ= -github.com/operator-framework/operator-lifecycle-manager v0.30.0 h1:tt98f0lx2EfxfE/5PbpUi5fTnDnqZ17zGpftUA6oRm0= -github.com/operator-framework/operator-lifecycle-manager v0.30.0/go.mod h1:nPbzJVqxAwoUz5q7QovYZcHN/v4in5sucPT2UpF+ikQ= +github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a h1:YLnZtVfqGUfTbQ+M06QAslEmP4WrnRoPrk4AtoBJdm8= +github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a/go.mod h1:DCRz1EgdayEmr9b6KXKDL+DWBN0rGHu/VYADeHzPoOk= +github.com/operator-framework/ansible-operator-plugins v1.42.0 h1:ahupKUXl7sYKILEUp1tiQNW9WiFxpGGyN1UQ/EfsNGY= +github.com/operator-framework/ansible-operator-plugins v1.42.0/go.mod h1:gGyNgCrNU1opGioTWbYdnbRTcJkJrFPS8Ysu/hKybnE= +github.com/operator-framework/api v0.39.0 h1:9h7aVufeQ+l2ACXJE51hkMFcqrQwJOLM6/vwgGu6tgI= +github.com/operator-framework/api v0.39.0/go.mod h1:tcYIwuznZzfo4HKUTu0dbquIHqxiewnKW/ZmhHKzMH8= +github.com/operator-framework/operator-lifecycle-manager v0.40.0 h1:IDR+NNdrghAxVaSy1uEoMLRObylXPdjV1392ZEO3OZI= +github.com/operator-framework/operator-lifecycle-manager v0.40.0/go.mod h1:GkRZehCNOiOAdFrByUIU/W7nG+KWSs77ceHY839bFfg= github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= -github.com/operator-framework/operator-registry v1.57.0 h1:mQ4c8A8VUxZPJ0QCFRNio+7JEsLX6eKxlDSl6ORCRdk= -github.com/operator-framework/operator-registry v1.57.0/go.mod h1:9rAZH/LZ/ttEuTvL1D4KApGqOtRDE6fJzzOrJNcBu7g= -github.com/operator-framework/operator-sdk v1.41.1 h1:dO+YeKerID4e4fjwi2LmDmaE2JzObF5pGTJm0dgVcjw= -github.com/operator-framework/operator-sdk v1.41.1/go.mod h1:7CSt3iO8Df2xPMtAcXi84a/K/bMnalx+m3DTELZ5lU8= +github.com/operator-framework/operator-registry v1.63.0 h1:UIahnpjkH7y98A8AgPw3DUXVsM1yQr36JajRaJ/00nQ= +github.com/operator-framework/operator-registry v1.63.0/go.mod h1:A1w3zzvxx1h5rvkuQG2FgHq6lTMHASLb/YPelq7AmxQ= +github.com/operator-framework/operator-sdk v1.42.0 h1:ng0eWo1GInKQ12ycwzMVK0Eq/T3m0N1c6f1h1exskRk= +github.com/operator-framework/operator-sdk v1.42.0/go.mod h1:6XuqltQbJb7H0QRBzJjaU6a77TUsodvLOf3ArHkxUGQ= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= @@ -416,8 +440,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= -github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M= -github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/proglottis/gpgme v0.1.6 h1:8WpQ8VWggLdxkuTnW+sZ1r1t92XBNd8GZNDhQ4Rz+98= +github.com/proglottis/gpgme v0.1.6/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -425,29 +449,28 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= -github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 h1:uTiEyEyfLhkw678n6EulHVto8AkcXVr8zUcBJNZ0ark= -github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0/go.mod h1:eFYL/99JvdLP4T9/3FZ5t2pClnv7mMskc+WstTcyVr4= -github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 h1:4z7/hCJ9Jft8EBb2tDmK38p2WjyIEJ1ShhhwAhjOCps= -github.com/redis/go-redis/extra/redisotel/v9 v9.10.0/go.mod h1:B0thqLh4hB8MvvcUKSwyP5YiIcCCp8UrQ0cA9gEqyjk= -github.com/redis/go-redis/v9 v9.10.0 h1:FxwK3eV8p/CQa0Ch276C7u2d0eNC9kCmAYQ7mCXCzVs= -github.com/redis/go-redis/v9 v9.10.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos= +github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= +github.com/redis/go-redis/extra/rediscmd/v9 v9.17.3 h1:v9RNP5ynWkruvzscrIoDyyv20c9YeyVn12L9nYnaexw= +github.com/redis/go-redis/extra/rediscmd/v9 v9.17.3/go.mod h1:gdthSemCkR3WxTmzV2XxYIxClunkUJZAhL0zPHaB0Ww= +github.com/redis/go-redis/extra/redisotel/v9 v9.17.3 h1:bF0e3fV7PL0knd1UHDtMud8wA7CZt3RSWtyTMhpnWd8= +github.com/redis/go-redis/extra/redisotel/v9 v9.17.3/go.mod h1:gR39sPK/dJZlqgIA9Nm4JFHcQJPyhsISBLj708nrD4w= +github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1DQx4= +github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= -github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= -github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= -github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= +github.com/secure-systems-lab/go-securesystemslib v0.10.0 h1:l+H5ErcW0PAehBNrBxoGv1jjNpGYdZ9RcheFkB2WI14= +github.com/secure-systems-lab/go-securesystemslib v0.10.0/go.mod h1:MRKONWmRoFzPNQ9USRF9i1mc7MvAVvF1LlW8X5VWDvk= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= @@ -458,27 +481,25 @@ github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA= github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= -github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= -github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= -github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= -github.com/stoewer/go-strcase v1.3.1 h1:iS0MdW+kVTxgMoE1LAZyMiYJFKlOzLooE4MxjirtkAs= -github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -498,84 +519,87 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= -github.com/ulikunitz/xz v0.5.14 h1:uv/0Bq533iFdnMHZdRBTOlaNMdb1+ZxXIlHDZHIHcvg= -github.com/ulikunitz/xz v0.5.14/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= -github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= -github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= +github.com/vbauerster/mpb/v8 v8.11.3 h1:iniBmO4ySXCl4gVdmJpgrtormH5uvjpxcx/dMyVU9Jw= +github.com/vbauerster/mpb/v8 v8.11.3/go.mod h1:n9M7WbP0NFjpgKS5XdEC3tMRgZTNM/xtC8zWGkiMuy0= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= -go.etcd.io/etcd/api/v3 v3.6.0 h1:vdbkcUBGLf1vfopoGE/uS3Nv0KPyIpUV/HM6w9yx2kM= -go.etcd.io/etcd/api/v3 v3.6.0/go.mod h1:Wt5yZqEmxgTNJGHob7mTVBJDZNXiHPtXTcPab37iFOw= -go.etcd.io/etcd/client/pkg/v3 v3.6.0 h1:nchnPqpuxvv3UuGGHaz0DQKYi5EIW5wOYsgUNRc365k= -go.etcd.io/etcd/client/pkg/v3 v3.6.0/go.mod h1:Jv5SFWMnGvIBn8o3OaBq/PnT0jjsX8iNokAUessNjoA= -go.etcd.io/etcd/client/v3 v3.6.0 h1:/yjKzD+HW5v/3DVj9tpwFxzNbu8hjcKID183ug9duWk= -go.etcd.io/etcd/client/v3 v3.6.0/go.mod h1:Jzk/Knqe06pkOZPHXsQ0+vNDvMQrgIqJ0W8DwPdMJMg= +go.etcd.io/etcd/api/v3 v3.6.7 h1:7BNJ2gQmc3DNM+9cRkv7KkGQDayElg8x3X+tFDYS+E0= +go.etcd.io/etcd/api/v3 v3.6.7/go.mod h1:xJ81TLj9hxrYYEDmXTeKURMeY3qEDN24hqe+q7KhbnI= +go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/AhBVs= +go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q= +go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U= +go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0 h1:RyrtJzu5MAmIcbRrwg75b+w3RlZCP0vJByDVzcpAe3M= -go.opentelemetry.io/contrib/bridges/prometheus v0.61.0/go.mod h1:tirr4p9NXbzjlbruiRGp53IzlYrDk5CO2fdHj0sSSaY= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0 h1:XfzKtKSrbtYk9TNCF8dkO0Y9M7IOfb4idCwBOTwGBiI= -go.opentelemetry.io/contrib/exporters/autoexport v0.61.0/go.mod h1:N6otC+qXTD5bAnbK2O1f/1SXq3cX+3KYSWrkBUqG0cw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2/go.mod h1:QTnxBwT/1rBIgAG1goq6xMydfYOBKU6KTiYF4fp5zL8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwdo1gS2eH26Rg+CoqVQpEK1h8gvt5qyU5Kk5Bixvow= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= -go.opentelemetry.io/otel/exporters/prometheus v0.58.0/go.mod h1:7qo/4CLI+zYSNbv0GMNquzuss2FVZo3OYrGh96n4HNc= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2 h1:12vMqzLLNZtXuXbJhSENRg+Vvx+ynNilV8twBLBsXMY= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.12.2/go.mod h1:ZccPZoPOoq8x3Trik/fCsba7DEYDUnN6yX79pgp2BUQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwWBJF/mHZc7A02YHedfFDENwJEdRA0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY= -go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc= -go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= -go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0= -go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY= -go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= -go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= -go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= -go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 h1:I/7S/yWobR3QHFLqHsJ8QOndoiFsj1VgHpQiq43KlUI= +go.opentelemetry.io/contrib/bridges/prometheus v0.65.0/go.mod h1:jPF6gn3y1E+nozCAEQj3c6NZ8KY+tvAgSVfvoOJUFac= +go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 h1:2gApdml7SznX9szEKFjKjM4qGcGSvAybYLBY319XG3g= +go.opentelemetry.io/contrib/exporters/autoexport v0.65.0/go.mod h1:0QqAGlbHXhmPYACG3n5hNzO5DnEqqtg4VcK5pr22RI0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 h1:XmiuHzgJt067+a6kwyAzkhXooYVv3/TOw9cM2VfJgUM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0/go.mod h1:KDgtbWKTQs4bM+VPUr6WlL9m/WXcmkCcBlIzqxPGzmI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0 h1:ZVg+kCXxd9LtAaQNKBxAvJ5NpMf7LpvEr4MIZqb0TMQ= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.16.0/go.mod h1:hh0tMeZ75CCXrHd9OXRYxTlCAdxcXioWHFIpYw2rZu8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0 h1:djrxvDxAe44mJUrKataUbOhCKhR3F8QCyWucO16hTQs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.16.0/go.mod h1:dt3nxpQEiSoKvfTVxp3TUg5fHPLhKtbcnN3Z1I1ePD0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0 h1:NOyNnS19BF2SUDApbOKbDtWZ0IK7b8FJ2uAGdIWOGb0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.40.0/go.mod h1:VL6EgVikRLcJa9ftukrHu/ZkkhFBSo1lzvdBC9CF1ss= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0 h1:9y5sHvAxWzft1WQ4BwqcvA+IFVUJ1Ya75mSAUnFEVwE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.40.0/go.mod h1:eQqT90eR3X5Dbs1g9YSM30RavwLF725Ris5/XSXWvqE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0/go.mod h1:bTdK1nhqF76qiPoCCdyFIV+N/sRHYXYCTQc+3VCi3MI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0 h1:DvJDOPmSWQHWywQS6lKL+pb8s3gBLOZUtw4N+mavW1I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.40.0/go.mod h1:EtekO9DEJb4/jRyN4v4Qjc2yA7AtfCBuz2FynRUWTXs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0 h1:wVZXIWjQSeSmMoxF74LzAnpVQOAFDo3pPji9Y4SOFKc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.40.0/go.mod h1:khvBS2IggMFNwZK/6lEeHg/W57h/IX6J4URh57fuI40= +go.opentelemetry.io/otel/exporters/prometheus v0.62.0 h1:krvC4JMfIOVdEuNPTtQ0ZjCiXrybhv+uOHMfHRmnvVo= +go.opentelemetry.io/otel/exporters/prometheus v0.62.0/go.mod h1:fgOE6FM/swEnsVQCqCnbOfRV4tOnWPg7bVeo4izBuhQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0 h1:ivlbaajBWJqhcCPniDqDJmRwj4lc6sRT+dCAVKNmxlQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.16.0/go.mod h1:u/G56dEKDDwXNCVLsbSrllB2o8pbtFLUC4HpR66r2dc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0 h1:ZrPRak/kS4xI3AVXy8F7pipuDXmDsrO8Lg+yQjBLjw0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.40.0/go.mod h1:3y6kQCWztq6hyW8Z9YxQDDm0Je9AJoFar2G0yDcmhRk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0 h1:MzfofMZN8ulNqobCmCAVbqVL5syHw+eB2qPRkCMA/fQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.40.0/go.mod h1:E73G9UFtKRXrxhBsHtG00TB5WxX57lpsQzogDkqBTz8= +go.opentelemetry.io/otel/log v0.16.0 h1:DeuBPqCi6pQwtCK0pO4fvMB5eBq6sNxEnuTs88pjsN4= +go.opentelemetry.io/otel/log v0.16.0/go.mod h1:rWsmqNVTLIA8UnwYVOItjyEZDbKIkMxdQunsIhpUMes= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/log v0.16.0 h1:e/b4bdlQwC5fnGtG3dlXUrNOnP7c8YLVSpSfEBIkTnI= +go.opentelemetry.io/otel/sdk/log v0.16.0/go.mod h1:JKfP3T6ycy7QEuv3Hj8oKDy7KItrEkus8XJE6EoSzw4= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= +go.podman.io/common v0.66.1 h1:zDyd4HhVgQAN8LupBHCnhtM3FEOJ9DwmThjulXZq2qA= +go.podman.io/common v0.66.1/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc= +go.podman.io/image/v5 v5.38.0 h1:aUKrCANkPvze1bnhLJsaubcfz0d9v/bSDLnwsXJm6G4= +go.podman.io/image/v5 v5.38.0/go.mod h1:hSIoIUzgBnmc4DjoIdzk63aloqVbD7QXDMkSE/cvG90= +go.podman.io/storage v1.61.0 h1:5hD/oyRYt1f1gxgvect+8syZBQhGhV28dCw2+CZpx0Q= +go.podman.io/storage v1.61.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= @@ -592,24 +616,23 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -620,15 +643,14 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -637,7 +659,6 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= @@ -651,12 +672,9 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -666,8 +684,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -677,11 +695,10 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -689,8 +706,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -701,13 +718,12 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -716,8 +732,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -725,12 +741,12 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto v0.0.0-20260203192932-546029d2fa20 h1:/CU1zrxTpGylJJbe3Ru94yy6sZRbzALq2/oxl3pGB3U= +google.golang.org/genproto v0.0.0-20260203192932-546029d2fa20/go.mod h1:Tt+08/KdKEt3l8x3Pby3HLQxMB3uk/MzaQ4ZIv0ORTs= +google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0= +google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -752,8 +768,8 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -772,53 +788,55 @@ helm.sh/helm/v3 v3.18.6 h1:S/2CqcYnNfLckkHLI0VgQbxgcDaU3N4A/46E3n9wSNY= helm.sh/helm/v3 v3.18.6/go.mod h1:L/dXDR2r539oPlFP1PJqKAC1CUgqHJDLkxKpDGrWnyg= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= -k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= -k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= -k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= -k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= -k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= -k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= -k8s.io/cli-runtime v0.33.3 h1:Dgy4vPjNIu8LMJBSvs8W0LcdV0PX/8aGG1DA1W8lklA= -k8s.io/cli-runtime v0.33.3/go.mod h1:yklhLklD4vLS8HNGgC9wGiuHWze4g7x6XQZ+8edsKEo= -k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= -k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= -k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= -k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= +k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= +k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE= +k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= +k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.33.4 h1:TdIJKHb0/bLpby7FblXIaVEzyA1jGEjzt/n9cRvwq8U= -k8s.io/kube-aggregator v0.33.4/go.mod h1:wZuctdRvGde5bwzxkZRs0GYj2KOpCNgx8rRGVoNb62k= -k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a h1:ZV3Zr+/7s7aVbjNGICQt+ppKWsF1tehxggNfbM7XnG8= -k8s.io/kube-openapi v0.0.0-20250610211856-8b98d1ed966a/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/kubectl v0.33.3 h1:r/phHvH1iU7gO/l7tTjQk2K01ER7/OAJi8uFHHyWSac= -k8s.io/kubectl v0.33.3/go.mod h1:euj2bG56L6kUGOE/ckZbCoudPwuj4Kud7BR0GzyNiT0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= -k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-aggregator v0.35.0 h1:FBtbuRFA7Ohe2QKirFZcJf8rgimC8oSaNiCi4pdU5xw= +k8s.io/kube-aggregator v0.35.0/go.mod h1:vKBRpQUfDryb7udwUwF3eCSvv3AJNgHtL4PGl6PqAg8= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= +k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= +k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY= +k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= -sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE= -sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0 h1:hSfpvjjTQXQY2Fol2CS0QHMNs/WI1MOSGzCm1KhM5ec= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.34.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= +sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= +sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= +sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kubebuilder/v4 v4.6.0 h1:SBc37jghs3L2UaEL91A1t5K5dANrEviUDuNic9hMQSw= sigs.k8s.io/kubebuilder/v4 v4.6.0/go.mod h1:zlXrnLiJPDPpK4hKCUrlgzzLOusfA8Sd8tpYGIrvD00= -sigs.k8s.io/kustomize/api v0.19.0 h1:F+2HB2mU1MSiR9Hp1NEgoU2q9ItNOaBJl0I4Dlus5SQ= -sigs.k8s.io/kustomize/api v0.19.0/go.mod h1:/BbwnivGVcBh1r+8m3tH1VNxJmHSk1PzP5fkP6lbL1o= -sigs.k8s.io/kustomize/kyaml v0.19.0 h1:RFge5qsO1uHhwJsu3ipV7RNolC7Uozc0jUBC/61XSlA= -sigs.k8s.io/kustomize/kyaml v0.19.0/go.mod h1:FeKD5jEOH+FbZPpqUghBP8mrLjJ3+zD3/rf9NNu1cwY= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From 54a2b0e67a375be6da0122d5aa48afb84e325e48 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 10 Feb 2026 11:29:32 +0100 Subject: [PATCH 146/232] ROX-32817: refactor resource assertions (#18865) --- tests/compliance_operator_v2_test.go | 116 ++++++++++----------------- 1 file changed, 43 insertions(+), 73 deletions(-) diff --git a/tests/compliance_operator_v2_test.go b/tests/compliance_operator_v2_test.go index 066fbee1f609f..4e23fd4b76e66 100644 --- a/tests/compliance_operator_v2_test.go +++ b/tests/compliance_operator_v2_test.go @@ -39,8 +39,8 @@ import ( const ( coNamespaceV2 = "openshift-compliance" stackroxNamespace = "stackrox" - defaultTimeout = 90 * time.Second - eventuallyTimeout = 120 * time.Second + defaultTimeout = 120 * time.Second + defaultInterval = 5 * time.Second waitForDoneTimeout = 5 * time.Minute waitForDoneInterval = 30 * time.Second ) @@ -168,65 +168,45 @@ func cleanUpResources(ctx context.Context, t *testing.T, resourceName string, na } } -func assertResourceDoesExist(ctx context.Context, t testutils.T, resourceName string, namespace string, obj dynclient.Object) dynclient.Object { +func assertResourceDoesNotExist[T any, PT interface { + dynclient.Object + *T +}](ctx context.Context, t testutils.T, name, namespace string) { client := createDynamicClient(t) require.Eventually(t, func() bool { - return client.Get(ctx, types.NamespacedName{Name: resourceName, Namespace: namespace}, obj) == nil - }, defaultTimeout, 10*time.Millisecond) - return obj -} - -func assertResourceWasUpdated(ctx context.Context, t testutils.T, resourceName string, namespace string, obj dynclient.Object) dynclient.Object { - client := createDynamicClient(t) - oldResourceVersion := obj.GetResourceVersion() - timeout := time.NewTimer(defaultTimeout) - ticker := time.NewTicker(10 * time.Millisecond) - for { - select { - case <-ticker.C: - if client.Get(ctx, types.NamespacedName{Name: resourceName, Namespace: namespace}, obj) == nil && obj.GetResourceVersion() != oldResourceVersion { - return obj - } - case <-timeout.C: - // Timing-out in here does not necessarily indicate that the - // resource was not updated as the retrieval and assertion of the - // resource can race. - t.Logf("Timeout before we got a new resource version for %s %s (this might be ok)", obj.GetObjectKind().GroupVersionKind().String(), resourceName) - return obj - } - } - return obj -} - -func assertResourceDoesNotExist(ctx context.Context, t testutils.T, resourceName string, namespace string, obj dynclient.Object) { - client := createDynamicClient(t) - require.Eventually(t, func() bool { - err := client.Get(ctx, types.NamespacedName{Name: resourceName, Namespace: namespace}, obj) + var obj T + err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, PT(&obj)) return errors2.IsNotFound(err) - }, defaultTimeout, 10*time.Millisecond) + }, defaultTimeout, defaultInterval) } -func assertScanSetting(t testutils.T, scanConfig v2.ComplianceScanConfiguration, scanSetting *complianceoperatorv1.ScanSetting) { - require.NotNil(t, scanSetting) +func assertScanSetting(ctx context.Context, t testutils.T, client dynclient.Client, name, namespace string, scanConfig *v2.ComplianceScanConfiguration) { + scanSetting := &complianceoperatorv1.ScanSetting{} + err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, scanSetting) + require.NoError(t, err, "ScanSetting %s/%s does not exist", namespace, name) + cron, err := schedule.ConvertToCronTab(service.ConvertV2ScheduleToProto(scanConfig.GetScanConfig().GetScanSchedule())) require.NoError(t, err) assert.Equal(t, scanConfig.GetScanName(), scanSetting.GetName()) assert.Equal(t, cron, scanSetting.ComplianceSuiteSettings.Schedule) - assert.Contains(t, scanSetting.Labels, "app.kubernetes.io/name") - assert.Equal(t, scanSetting.Labels["app.kubernetes.io/name"], "stackrox") - assert.Contains(t, scanSetting.Annotations, "owner") - assert.Equal(t, scanSetting.Annotations["owner"], "stackrox") + require.Contains(t, scanSetting.GetLabels(), "app.kubernetes.io/name") + assert.Equal(t, scanSetting.GetLabels()["app.kubernetes.io/name"], "stackrox") + require.Contains(t, scanSetting.GetAnnotations(), "owner") + assert.Equal(t, scanSetting.GetAnnotations()["owner"], "stackrox") } -func assertScanSettingBinding(t testutils.T, scanConfig v2.ComplianceScanConfiguration, scanSettingBinding *complianceoperatorv1.ScanSettingBinding) { - require.NotNil(t, scanSettingBinding) +func assertScanSettingBinding(ctx context.Context, t testutils.T, client dynclient.Client, name, namespace string, scanConfig *v2.ComplianceScanConfiguration) { + scanSettingBinding := &complianceoperatorv1.ScanSettingBinding{} + err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, scanSettingBinding) + require.NoError(t, err, "ScanSettingBinding %s/%s does not exist", namespace, name) + assert.Equal(t, scanConfig.GetScanName(), scanSettingBinding.GetName()) for _, profile := range scanSettingBinding.Profiles { assert.Contains(t, scanConfig.GetScanConfig().GetProfiles(), profile.Name) } - assert.Contains(t, scanSettingBinding.Labels, "app.kubernetes.io/name") + require.Contains(t, scanSettingBinding.Labels, "app.kubernetes.io/name") assert.Equal(t, scanSettingBinding.Labels["app.kubernetes.io/name"], "stackrox") - assert.Contains(t, scanSettingBinding.Annotations, "owner") + require.Contains(t, scanSettingBinding.Annotations, "owner") assert.Equal(t, scanSettingBinding.Annotations["owner"], "stackrox") } @@ -235,7 +215,7 @@ func waitForDeploymentReady(ctx context.Context, t *testing.T, name string, name require.Eventually(t, func() bool { deployment := &appsv1.Deployment{} return client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, deployment) == nil && deployment.Status.ReadyReplicas == numReplicas - }, defaultTimeout, 10*time.Millisecond) + }, defaultTimeout, defaultInterval) } func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { @@ -278,14 +258,11 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 1) // Assert the ScanSetting and the ScanSettingBinding are created - scanSetting := &complianceoperatorv1.ScanSetting{} - scanSettingBinding := &complianceoperatorv1.ScanSettingBinding{} + client := createDynamicClient(t) assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertResourceDoesExist(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSetting) - assertResourceDoesExist(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSettingBinding) - assertScanSetting(wrapCollectT(t, c), scanConfig, scanSetting) - assertScanSettingBinding(wrapCollectT(t, c), scanConfig, scanSettingBinding) - }, eventuallyTimeout, 2*time.Second) + assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + }, defaultTimeout, defaultInterval) // Scale down Sensor assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 0)) @@ -304,11 +281,9 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { // Assert the ScanSetting and the ScanSettingBinding are updated assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertResourceWasUpdated(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSetting) - assertResourceWasUpdated(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSettingBinding) - assertScanSetting(wrapCollectT(t, c), scanConfig, scanSetting) - assertScanSettingBinding(wrapCollectT(t, c), scanConfig, scanSettingBinding) - }, eventuallyTimeout, 2*time.Second) + assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + }, defaultTimeout, defaultInterval) // Scale down Sensor assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 0)) @@ -325,8 +300,8 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 1) // Assert the ScanSetting and the ScanSettingBinding are deleted - assertResourceDoesNotExist(ctx, t, scanName, coNamespaceV2, scanSetting) - assertResourceDoesNotExist(ctx, t, scanName, coNamespaceV2, scanSettingBinding) + assertResourceDoesNotExist[complianceoperatorv1.ScanSetting](ctx, t, scanName, coNamespaceV2) + assertResourceDoesNotExist[complianceoperatorv1.ScanSettingBinding](ctx, t, scanName, coNamespaceV2) } // ACS API test suite for integration testing for the Compliance Operator. @@ -568,14 +543,11 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { assert.GreaterOrEqual(t, scanConfigs.TotalCount, int32(1)) // Assert the ScanSetting and the ScanSettingBinding are created - scanSetting := &complianceoperatorv1.ScanSetting{} - scanSettingBinding := &complianceoperatorv1.ScanSettingBinding{} + client := createDynamicClient(t) assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertResourceDoesExist(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSetting) - assertResourceDoesExist(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSettingBinding) - assertScanSetting(wrapCollectT(t, c), *req, scanSetting) - assertScanSettingBinding(wrapCollectT(t, c), *req, scanSettingBinding) - }, eventuallyTimeout, 2*time.Second) + assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, req) + assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, req) + }, defaultTimeout, defaultInterval) // Update the scan configuration updateReq := req.CloneVT() @@ -602,11 +574,9 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { // Assert the ScanSetting and the ScanSettingBinding are updated assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertResourceWasUpdated(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSetting) - assertResourceWasUpdated(ctx, wrapCollectT(t, c), scanName, coNamespaceV2, scanSettingBinding) - assertScanSetting(wrapCollectT(t, c), *updateReq, scanSetting) - assertScanSettingBinding(wrapCollectT(t, c), *updateReq, scanSettingBinding) - }, eventuallyTimeout, 2*time.Second) + assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, updateReq) + assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, updateReq) + }, defaultTimeout, defaultInterval) } func TestComplianceV2DeleteComplianceScanConfigurations(t *testing.T) { @@ -714,7 +684,7 @@ func TestComplianceV2ComplianceObjectMetadata(t *testing.T) { &scanSetting, ) require.NoError(c, err, "failed to get ScanSetting %s", testName) - }, defaultTimeout, 5*time.Second) + }, defaultTimeout, defaultInterval) assert.Contains(t, scanSetting.Labels, "app.kubernetes.io/name") assert.Equal(t, scanSetting.Labels["app.kubernetes.io/name"], "stackrox") From 19c0569a11fc43d00155e23d13905703a23b5ffb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Tue, 10 Feb 2026 13:26:34 +0100 Subject: [PATCH 147/232] ROX-33043: Fix Profile informer startup panic (#18921) Co-authored-by: Claude Sonnet 4.5 --- sensor/kubernetes/listener/resource_event_handler.go | 9 ++++----- sensor/kubernetes/listener/resources/dispatcher.go | 12 ++++++++---- .../listener/resources/mocks/dispatcher.go | 9 +++++---- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/sensor/kubernetes/listener/resource_event_handler.go b/sensor/kubernetes/listener/resource_event_handler.go index 6ad0bafe5a5b3..5e178b2e3c2af 100644 --- a/sensor/kubernetes/listener/resource_event_handler.go +++ b/sensor/kubernetes/listener/resource_event_handler.go @@ -167,8 +167,6 @@ func (k *listenerImpl) handleAllEvents() { } if coAvailable { log.Info("Initializing compliance operator informers") - profileLister = crdSharedInformerFactory.ForResource(complianceoperator.Profile.GroupVersionResource()).Lister() - complianceResultInformer = crdSharedInformerFactory.ForResource(complianceoperator.ComplianceCheckResult.GroupVersionResource()).Informer() complianceScanSettingBindingsInformer = crdSharedInformerFactory.ForResource(complianceoperator.ScanSettingBinding.GroupVersionResource()).Informer() complianceRuleInformer = crdSharedInformerFactory.ForResource(complianceoperator.Rule.GroupVersionResource()).Informer() @@ -233,7 +231,6 @@ func (k *listenerImpl) handleAllEvents() { dispatchers := resources.NewDispatcherRegistry( clusterID, podInformer.Lister(), - profileLister, processfilter.Singleton(), k.configHandler, k.credentialsManager, @@ -391,10 +388,12 @@ func (k *listenerImpl) handleAllEvents() { // Compliance operator profiles are handled AFTER results, rules, and scan setting bindings have been synced if coAvailable { - complianceProfileInformer := crdSharedInformerFactory.ForResource(complianceoperator.Profile.GroupVersionResource()).Informer() + profileGenericInformer := crdSharedInformerFactory.ForResource(complianceoperator.Profile.GroupVersionResource()) + complianceProfileInformer := profileGenericInformer.Informer() + profileLister = profileGenericInformer.Lister() complianceTailoredProfileInformer := crdSharedInformerFactory.ForResource(complianceoperator.TailoredProfile.GroupVersionResource()).Informer() handle(k.context, complianceProfileInformer, dispatchers.ForComplianceOperatorProfiles(), k.pubSubDispatcher, k.outputQueue, &syncingResources, preTopLevelDeploymentWaitGroup, stopSignal, &eventLock) - handle(k.context, complianceTailoredProfileInformer, dispatchers.ForComplianceOperatorTailoredProfiles(), k.pubSubDispatcher, k.outputQueue, &syncingResources, preTopLevelDeploymentWaitGroup, stopSignal, &eventLock) + handle(k.context, complianceTailoredProfileInformer, dispatchers.ForComplianceOperatorTailoredProfiles(profileLister), k.pubSubDispatcher, k.outputQueue, &syncingResources, preTopLevelDeploymentWaitGroup, stopSignal, &eventLock) } if !startAndWait(stopSignal, preTopLevelDeploymentWaitGroup, sif, crdSharedInformerFactory, osRouteFactory) { diff --git a/sensor/kubernetes/listener/resources/dispatcher.go b/sensor/kubernetes/listener/resources/dispatcher.go index 101e1eb587890..416be0b856967 100644 --- a/sensor/kubernetes/listener/resources/dispatcher.go +++ b/sensor/kubernetes/listener/resources/dispatcher.go @@ -56,7 +56,7 @@ type DispatcherRegistry interface { ForComplianceOperatorScanSettingBindings() Dispatcher ForComplianceOperatorScans() Dispatcher ForComplianceOperatorSuites() Dispatcher - ForComplianceOperatorTailoredProfiles() Dispatcher + ForComplianceOperatorTailoredProfiles(profileLister cache.GenericLister) Dispatcher ForComplianceOperatorRemediations() Dispatcher } @@ -64,7 +64,6 @@ type DispatcherRegistry interface { func NewDispatcherRegistry( clusterID string, podLister v1Listers.PodLister, - profileLister cache.GenericLister, processFilter filter.Filter, configHandler config.Handler, credentialsManager awscredentials.RegistryCredentialsManager, @@ -106,7 +105,6 @@ func NewDispatcherRegistry( complianceOperatorProfileDispatcher: dispatchers.NewProfileDispatcher(), complianceOperatorScanSettingBindingsDispatcher: dispatchers.NewScanSettingBindingsDispatcher(), complianceOperatorScanDispatcher: dispatchers.NewScanDispatcher(), - complianceOperatorTailoredProfileDispatcher: dispatchers.NewTailoredProfileDispatcher(profileLister), complianceOperatorSuiteDispatcher: dispatchers.NewSuitesDispatcher(), complianceOperatorRemediationDispatcher: dispatchers.NewRemediationDispatcher(), @@ -301,7 +299,13 @@ func (d *registryImpl) ForComplianceOperatorProfiles() Dispatcher { return wrapDispatcher(d.complianceOperatorProfileDispatcher, d.traceWriter) } -func (d *registryImpl) ForComplianceOperatorTailoredProfiles() Dispatcher { +func (d *registryImpl) ForComplianceOperatorTailoredProfiles(profileLister cache.GenericLister) Dispatcher { + // Lazy initialization: create the dispatcher on first call. + // This allows the profileLister to be provided after the registry is created, + // which is necessary to avoid creating the Profile informer too early in the startup sequence. + if d.complianceOperatorTailoredProfileDispatcher == nil { + d.complianceOperatorTailoredProfileDispatcher = dispatchers.NewTailoredProfileDispatcher(profileLister) + } return wrapDispatcher(d.complianceOperatorTailoredProfileDispatcher, d.traceWriter) } diff --git a/sensor/kubernetes/listener/resources/mocks/dispatcher.go b/sensor/kubernetes/listener/resources/mocks/dispatcher.go index 055c7a41357e6..38696d505a83d 100644 --- a/sensor/kubernetes/listener/resources/mocks/dispatcher.go +++ b/sensor/kubernetes/listener/resources/mocks/dispatcher.go @@ -16,6 +16,7 @@ import ( component "github.com/stackrox/rox/sensor/kubernetes/eventpipeline/component" resources "github.com/stackrox/rox/sensor/kubernetes/listener/resources" gomock "go.uber.org/mock/gomock" + cache "k8s.io/client-go/tools/cache" ) // MockDispatcher is a mock of Dispatcher interface. @@ -193,17 +194,17 @@ func (mr *MockDispatcherRegistryMockRecorder) ForComplianceOperatorSuites() *gom } // ForComplianceOperatorTailoredProfiles mocks base method. -func (m *MockDispatcherRegistry) ForComplianceOperatorTailoredProfiles() resources.Dispatcher { +func (m *MockDispatcherRegistry) ForComplianceOperatorTailoredProfiles(profileLister cache.GenericLister) resources.Dispatcher { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ForComplianceOperatorTailoredProfiles") + ret := m.ctrl.Call(m, "ForComplianceOperatorTailoredProfiles", profileLister) ret0, _ := ret[0].(resources.Dispatcher) return ret0 } // ForComplianceOperatorTailoredProfiles indicates an expected call of ForComplianceOperatorTailoredProfiles. -func (mr *MockDispatcherRegistryMockRecorder) ForComplianceOperatorTailoredProfiles() *gomock.Call { +func (mr *MockDispatcherRegistryMockRecorder) ForComplianceOperatorTailoredProfiles(profileLister any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForComplianceOperatorTailoredProfiles", reflect.TypeOf((*MockDispatcherRegistry)(nil).ForComplianceOperatorTailoredProfiles)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForComplianceOperatorTailoredProfiles", reflect.TypeOf((*MockDispatcherRegistry)(nil).ForComplianceOperatorTailoredProfiles), profileLister) } // ForDeployments mocks base method. From 2837f3d337948a5d079b520af69215ceac3e30b8 Mon Sep 17 00:00:00 2001 From: Misha Sugakov <537715+msugakov@users.noreply.github.com> Date: Tue, 10 Feb 2026 14:04:12 +0100 Subject: [PATCH 148/232] chore: Turn off rpm-lockfile-prototype post-upgrade task (#18940) --- .github/renovate.json5 | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 9985276aa8626..d7c295b602c5e 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -58,12 +58,6 @@ // Override Konflux custom schedule for this manager to our intended one. "after 3am and before 7am", ], - "postUpgradeTasks": { - "commands": [ - // Refresh the rpm lockfile after updating image references in the dockerfile. - "rpm-lockfile-prototype rpms.in.yaml", - ], - }, }, "rpm-lockfile": { "schedule": [ From a7848e280239470bbf902731c0681d85b466713e Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Tue, 10 Feb 2026 13:10:20 +0000 Subject: [PATCH 149/232] ROX-32976: Replace Docker Hub images with quay.io to avoid rate limits (#18867) Co-authored-by: Claude Sonnet 4.5 --- .github/workflows/unit-tests.yaml | 18 +++++++++++++ scripts/ci/lib.sh | 13 ++++++++++ .../yaml/co-deployment.yaml | 2 +- .../tests/connection/alerts/yaml/nginx.yaml | 2 +- .../k8sreconciliation/yaml/nginx.yaml | 2 +- .../k8sreconciliation/yaml/nginx2.yaml | 2 +- .../k8sreconciliation/yaml/nginx3.yaml | 2 +- .../k8sreconciliation/yaml/nginx4.yaml | 2 +- .../tests/connection/runtime/runtime_test.go | 8 +++--- .../tests/connection/runtime/yaml/nginx.yaml | 2 +- .../tests/connection/runtime/yaml/talk.yaml | 4 +-- sensor/tests/connection/yaml/nginx.yaml | 2 +- sensor/tests/connection/yaml/nginx2.yaml | 2 +- sensor/tests/data/runtime-policies.json | 2 +- sensor/tests/images-to-prefetch.txt | 26 +++++++++++++++++++ sensor/tests/resource/imagescan/yaml/pod.yaml | 2 +- .../resource/networkpolicy/yaml/nginx.yaml | 2 +- sensor/tests/resource/pod/pod_test.go | 8 +++--- sensor/tests/resource/pod/yaml/nginx-pod.yaml | 2 +- sensor/tests/resource/pod/yaml/nginx.yaml | 2 +- .../tests/resource/role/yaml/nginx-pod.yaml | 2 +- sensor/tests/resource/role/yaml/nginx.yaml | 2 +- .../resource/service/yaml/nginx-pod.yaml | 2 +- sensor/tests/resource/service/yaml/nginx.yaml | 2 +- tests/e2e/sensor.sh | 9 +++++++ 25 files changed, 94 insertions(+), 28 deletions(-) create mode 100644 sensor/tests/images-to-prefetch.txt diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index b529981e09341..f048282df2838 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -403,11 +403,29 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_RHACS_ENG_RO_USERNAME }} + password: ${{ secrets.QUAY_RHACS_ENG_RO_PASSWORD }} + - name: Create k8s Kind Cluster uses: helm/kind-action@v1 with: kubeconfig: "${{ env.KUBECONFIG }}" + - name: Load test images into Kind + run: | + # Read images from prefetch list (single source of truth) + while IFS= read -r image; do + # Skip empty lines and comments + [[ -z "$image" || "$image" =~ ^# ]] && continue + echo "Loading $image into Kind..." + docker pull "$image" + kind load docker-image "$image" --name chart-testing + done < sensor/tests/images-to-prefetch.txt + - name: Run sensor integration tests run: make sensor-integration-test diff --git a/scripts/ci/lib.sh b/scripts/ci/lib.sh index 7a2ece08a8c33..a6e41e259e581 100755 --- a/scripts/ci/lib.sh +++ b/scripts/ci/lib.sh @@ -640,6 +640,13 @@ _image_prefetcher_prebuilt_start() { # prefect list stays up to date with additions. ci_export "IMAGE_PULL_POLICY_FOR_QUAY_IO" "Never" ;; + *sensor-integration-tests) + image_prefetcher_start_set sensor-integration + # Override the default image pull policy for containers with quay.io + # images to rely on prefetched images. This helps ensure that the static + # prefect list stays up to date with additions. + ci_export "IMAGE_PULL_POLICY_FOR_QUAY_IO" "Never" + ;; *-operator-e2e-tests) image_prefetcher_start_set operator-e2e # TODO(ROX-20508): pre-fetch images of the release from which operator upgrade test starts as well. @@ -763,6 +770,9 @@ _image_prefetcher_prebuilt_await() { *nongroovy-e2e-tests) image_prefetcher_await_set qa-nongroovy-e2e ;; + *sensor-integration-tests) + image_prefetcher_await_set sensor-integration + ;; *-operator-e2e-tests) image_prefetcher_await_set operator-e2e # TODO(ROX-20508): pre-fetch images of the release from which operator upgrade test starts as well. @@ -920,6 +930,9 @@ populate_prefetcher_image_list() { qa-nongroovy-e2e) cp "$SCRIPTS_ROOT/tests/images-to-prefetch.txt" "$image_list" ;; + sensor-integration) + cp "$SCRIPTS_ROOT/sensor/tests/images-to-prefetch.txt" "$image_list" + ;; *) die "ERROR: An unsupported image prefetcher target was requested: $name" ;; diff --git a/sensor/tests/complianceoperator/yaml/co-deployment.yaml b/sensor/tests/complianceoperator/yaml/co-deployment.yaml index d7826a052f835..61c71837310b2 100644 --- a/sensor/tests/complianceoperator/yaml/co-deployment.yaml +++ b/sensor/tests/complianceoperator/yaml/co-deployment.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: compliance-operator - image: busybox + image: quay.io/rhacs-eng/qa-multi-arch-busybox:1.30 command: - tail - -f diff --git a/sensor/tests/connection/alerts/yaml/nginx.yaml b/sensor/tests/connection/alerts/yaml/nginx.yaml index 597ddab02b3b6..47caa8517db8f 100644 --- a/sensor/tests/connection/alerts/yaml/nginx.yaml +++ b/sensor/tests/connection/alerts/yaml/nginx.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/k8sreconciliation/yaml/nginx.yaml b/sensor/tests/connection/k8sreconciliation/yaml/nginx.yaml index 597ddab02b3b6..47caa8517db8f 100644 --- a/sensor/tests/connection/k8sreconciliation/yaml/nginx.yaml +++ b/sensor/tests/connection/k8sreconciliation/yaml/nginx.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/k8sreconciliation/yaml/nginx2.yaml b/sensor/tests/connection/k8sreconciliation/yaml/nginx2.yaml index 1cc86c13e2385..bb796a002c048 100644 --- a/sensor/tests/connection/k8sreconciliation/yaml/nginx2.yaml +++ b/sensor/tests/connection/k8sreconciliation/yaml/nginx2.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/k8sreconciliation/yaml/nginx3.yaml b/sensor/tests/connection/k8sreconciliation/yaml/nginx3.yaml index ac71a4b773141..1fe673d514c0c 100644 --- a/sensor/tests/connection/k8sreconciliation/yaml/nginx3.yaml +++ b/sensor/tests/connection/k8sreconciliation/yaml/nginx3.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/k8sreconciliation/yaml/nginx4.yaml b/sensor/tests/connection/k8sreconciliation/yaml/nginx4.yaml index 610f0e820bd0c..6969b818b1eb7 100644 --- a/sensor/tests/connection/k8sreconciliation/yaml/nginx4.yaml +++ b/sensor/tests/connection/k8sreconciliation/yaml/nginx4.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/runtime/runtime_test.go b/sensor/tests/connection/runtime/runtime_test.go index 371102ed0412f..1d6e28ebc64c4 100644 --- a/sensor/tests/connection/runtime/runtime_test.go +++ b/sensor/tests/connection/runtime/runtime_test.go @@ -23,7 +23,7 @@ var ( NginxService = helper.K8sResourceInfo{Kind: "Service", YamlFile: "nginx-service.yaml", Name: "nginx-service"} TalkPod = helper.K8sResourceInfo{Kind: "Pod", YamlFile: "talk.yaml", Name: "talk"} - processIndicatorPolicyName = "test-pi-curl" + processIndicatorPolicyName = "test-pi-wget" networkFlowPolicyName = "test-flow" ) @@ -95,7 +95,7 @@ func Test_SensorIntermediateRuntimeEvents(t *testing.T) { nginxIP := c.GetIPFromService(srvObj) require.NotEqual(t, "", nginxIP) - helper.SendSignalMessage(fakeCollector, talkContainerIds[0], "curl") + helper.SendSignalMessage(fakeCollector, talkContainerIds[0], "wget") helper.SendFlowMessage(fakeCollector, sensor.SocketFamily_SOCKET_FAMILY_UNKNOWN, storage.L4Protocol_L4_PROTOCOL_TCP, @@ -119,7 +119,7 @@ func Test_SensorIntermediateRuntimeEvents(t *testing.T) { } expectedSignals := []helper.ExpectedSignalMessageFn{ func(msg *sensor.SignalStreamMessage) bool { - return msg.GetSignal().GetProcessSignal().GetName() == "curl" && msg.GetSignal().GetProcessSignal().GetContainerId() == talkContainerIds[0] + return msg.GetSignal().GetProcessSignal().GetName() == "wget" && msg.GetSignal().GetProcessSignal().GetContainerId() == talkContainerIds[0] }, } go helper.WaitToReceiveMessagesFromCollector(ctx, &messagesReceivedSignal, @@ -143,7 +143,7 @@ func Test_SensorIntermediateRuntimeEvents(t *testing.T) { msg, err := testContext.WaitForMessageWithMatcher(func(event *central.MsgFromSensor) bool { return event.GetEvent().GetProcessIndicator().GetDeploymentId() == talkUID && - event.GetEvent().GetProcessIndicator().GetSignal().GetName() == "curl" + event.GetEvent().GetProcessIndicator().GetSignal().GetName() == "wget" }, time.Minute) assert.NoError(t, err) assert.NotNil(t, msg) diff --git a/sensor/tests/connection/runtime/yaml/nginx.yaml b/sensor/tests/connection/runtime/yaml/nginx.yaml index c295e5f91af27..9eb5b5270c387 100644 --- a/sensor/tests/connection/runtime/yaml/nginx.yaml +++ b/sensor/tests/connection/runtime/yaml/nginx.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/runtime/yaml/talk.yaml b/sensor/tests/connection/runtime/yaml/talk.yaml index 384d7647380f7..4ea02201c4107 100644 --- a/sensor/tests/connection/runtime/yaml/talk.yaml +++ b/sensor/tests/connection/runtime/yaml/talk.yaml @@ -7,7 +7,7 @@ metadata: spec: containers: - name: talk - image: alpine/curl + image: quay.io/rhacs-eng/qa:alpine-3.16.0 command: ["/bin/sh", "-c"] args: - - "while true ; do sleep 2; curl http://nginx-service.sensor-integration.svc.cluster.local:80; done" + - "while true ; do sleep 2; wget -O- http://nginx-service.sensor-integration.svc.cluster.local:80; done" diff --git a/sensor/tests/connection/yaml/nginx.yaml b/sensor/tests/connection/yaml/nginx.yaml index 685c17aa68e1d..d0fd9d3ca351e 100644 --- a/sensor/tests/connection/yaml/nginx.yaml +++ b/sensor/tests/connection/yaml/nginx.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/connection/yaml/nginx2.yaml b/sensor/tests/connection/yaml/nginx2.yaml index 1945de4195fbb..0e9a49c5f7947 100644 --- a/sensor/tests/connection/yaml/nginx2.yaml +++ b/sensor/tests/connection/yaml/nginx2.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/data/runtime-policies.json b/sensor/tests/data/runtime-policies.json index a4eaa2b504171..c9de71e258275 100644 --- a/sensor/tests/data/runtime-policies.json +++ b/sensor/tests/data/runtime-policies.json @@ -1,3 +1,3 @@ {"policies":[ -{"id":"cdd8ceab-784e-4726-b4de-86334b230b61","name":"test-pi-curl","description":"","rationale":"","remediation":"","disabled":false,"categories":["Anomalous Activity"],"lifecycleStages":["RUNTIME"],"eventSource":"DEPLOYMENT_EVENT","exclusions":[],"scope":[],"severity":"LOW_SEVERITY","enforcementActions":[],"notifiers":[],"lastUpdated":"2024-02-05T11:50:54.392130134Z","SORTName":"test-pi-curl","SORTLifecycleStage":"RUNTIME","SORTEnforcement":false,"policyVersion":"1.1","policySections":[{"sectionName":"Policy Section 1","policyGroups":[{"fieldName":"Process Name","booleanOperator":"OR","negate":false,"values":[{"value":"curl"}]}]}],"mitreAttackVectors":[],"criteriaLocked":false,"mitreVectorsLocked":false,"isDefault":false}, +{"id":"cdd8ceab-784e-4726-b4de-86334b230b61","name":"test-pi-wget","description":"","rationale":"","remediation":"","disabled":false,"categories":["Anomalous Activity"],"lifecycleStages":["RUNTIME"],"eventSource":"DEPLOYMENT_EVENT","exclusions":[],"scope":[],"severity":"LOW_SEVERITY","enforcementActions":[],"notifiers":[],"lastUpdated":"2024-02-05T11:50:54.392130134Z","SORTName":"test-pi-wget","SORTLifecycleStage":"RUNTIME","SORTEnforcement":false,"policyVersion":"1.1","policySections":[{"sectionName":"Policy Section 1","policyGroups":[{"fieldName":"Process Name","booleanOperator":"OR","negate":false,"values":[{"value":"wget"}]}]}],"mitreAttackVectors":[],"criteriaLocked":false,"mitreVectorsLocked":false,"isDefault":false}, {"id":"b626cb9d-0bd1-4c6c-b8d0-b9c69f07722c","name":"test-flow","description":"","rationale":"","remediation":"","disabled":false,"categories":["Anomalous Activity"],"lifecycleStages":["RUNTIME"],"eventSource":"DEPLOYMENT_EVENT","exclusions":[],"scope":[],"severity":"LOW_SEVERITY","enforcementActions":[],"notifiers":[],"lastUpdated":"2024-02-05T11:51:34.581615895Z","SORTName":"test-flow","SORTLifecycleStage":"RUNTIME","SORTEnforcement":false,"policyVersion":"1.1","policySections":[{"sectionName":"Policy Section 1","policyGroups":[{"fieldName":"Unexpected Network Flow Detected","booleanOperator":"OR","negate":false,"values":[{"value":"false"}]}]}],"mitreAttackVectors":[],"criteriaLocked":false,"mitreVectorsLocked":false,"isDefault":false}]} diff --git a/sensor/tests/images-to-prefetch.txt b/sensor/tests/images-to-prefetch.txt new file mode 100644 index 0000000000000..cd66706637910 --- /dev/null +++ b/sensor/tests/images-to-prefetch.txt @@ -0,0 +1,26 @@ +# Images used by sensor integration tests - prefetched to avoid Docker Hub rate limits +# +# This file is referenced by scripts/ci/lib.sh:populate_prefetcher_image_list() +# Pattern follows tests/images-to-prefetch.txt and qa-tests-backend/scripts/images-to-prefetch.txt + +# Used by sensor/tests/connection/runtime/yaml/nginx.yaml +# Used by sensor/tests/connection/alerts/yaml/nginx.yaml +# Used by sensor/tests/connection/k8sreconciliation/yaml/nginx*.yaml (4 files) +# Used by sensor/tests/connection/yaml/nginx*.yaml (2 files) +# Used by sensor/tests/resource/networkpolicy/yaml/nginx.yaml +# Used by sensor/tests/resource/pod/yaml/nginx.yaml +# Used by sensor/tests/resource/role/yaml/nginx.yaml +# Used by sensor/tests/resource/service/yaml/nginx.yaml +quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 + +# Used by sensor/tests/resource/pod/yaml/nginx-pod.yaml +# Used by sensor/tests/resource/role/yaml/nginx-pod.yaml +# Used by sensor/tests/resource/service/yaml/nginx-pod.yaml +# Used by sensor/tests/resource/imagescan/yaml/pod.yaml +# (All nginx-pod.yaml files use the same nginx-1.21.1 image) + +# Used by sensor/tests/connection/runtime/yaml/talk.yaml +quay.io/rhacs-eng/qa:alpine-3.16.0 + +# Used by sensor/tests/complianceoperator/yaml/co-deployment.yaml +quay.io/rhacs-eng/qa-multi-arch-busybox:1.30 diff --git a/sensor/tests/resource/imagescan/yaml/pod.yaml b/sensor/tests/resource/imagescan/yaml/pod.yaml index ea761586eeabd..05e2e4a572481 100644 --- a/sensor/tests/resource/imagescan/yaml/pod.yaml +++ b/sensor/tests/resource/imagescan/yaml/pod.yaml @@ -7,6 +7,6 @@ metadata: spec: containers: - name: app - image: nginx:1.14.1 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/networkpolicy/yaml/nginx.yaml b/sensor/tests/resource/networkpolicy/yaml/nginx.yaml index 685c17aa68e1d..d0fd9d3ca351e 100644 --- a/sensor/tests/resource/networkpolicy/yaml/nginx.yaml +++ b/sensor/tests/resource/networkpolicy/yaml/nginx.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/pod/pod_test.go b/sensor/tests/resource/pod/pod_test.go index 2dab0723f1bca..627b81d16a78c 100644 --- a/sensor/tests/resource/pod/pod_test.go +++ b/sensor/tests/resource/pod/pod_test.go @@ -96,8 +96,8 @@ func (s *PodHierarchySuite) Test_ContainerSpecOnDeployment() { s.Require().NoError(err) testC.LastDeploymentState(t, "nginx-deployment", - assertDeploymentContainerImages("docker.io/library/nginx:1.14.2"), - "nginx deployment should have a single container with nginx:1.14.2 image") + assertDeploymentContainerImages("quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1"), + "nginx deployment should have a single container with quay.io nginx image") messages := testC.GetFakeCentral().GetAllMessages() uniquePodNames := helper.GetUniquePodNamesFromPrefix(messages, "sensor-integration", "nginx-") @@ -122,8 +122,8 @@ func (s *PodHierarchySuite) Test_ParentlessPodsAreTreatedAsDeployments() { s.Require().NoError(err) testC.LastDeploymentState(t, "nginx-rogue", - assertDeploymentContainerImages("docker.io/library/nginx:1.14.1"), - "nginx standalone pod should have a single container with nginx:1.14.1 image") + assertDeploymentContainerImages("quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1"), + "nginx standalone pod should have a single container with quay.io nginx image") messages := testC.GetFakeCentral().GetAllMessages() uniqueDeployments := helper.GetUniqueDeploymentNames(messages, "sensor-integration") diff --git a/sensor/tests/resource/pod/yaml/nginx-pod.yaml b/sensor/tests/resource/pod/yaml/nginx-pod.yaml index 50abb906b6f8f..4b416838064d4 100644 --- a/sensor/tests/resource/pod/yaml/nginx-pod.yaml +++ b/sensor/tests/resource/pod/yaml/nginx-pod.yaml @@ -7,6 +7,6 @@ metadata: spec: containers: - name: nginx - image: nginx:1.14.1 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/pod/yaml/nginx.yaml b/sensor/tests/resource/pod/yaml/nginx.yaml index 685c17aa68e1d..d0fd9d3ca351e 100644 --- a/sensor/tests/resource/pod/yaml/nginx.yaml +++ b/sensor/tests/resource/pod/yaml/nginx.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/role/yaml/nginx-pod.yaml b/sensor/tests/resource/role/yaml/nginx-pod.yaml index 50abb906b6f8f..4b416838064d4 100644 --- a/sensor/tests/resource/role/yaml/nginx-pod.yaml +++ b/sensor/tests/resource/role/yaml/nginx-pod.yaml @@ -7,6 +7,6 @@ metadata: spec: containers: - name: nginx - image: nginx:1.14.1 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/role/yaml/nginx.yaml b/sensor/tests/resource/role/yaml/nginx.yaml index 39b53bfbb496e..97b5b4a54fc3b 100644 --- a/sensor/tests/resource/role/yaml/nginx.yaml +++ b/sensor/tests/resource/role/yaml/nginx.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 serviceAccountName: nginx-sa diff --git a/sensor/tests/resource/service/yaml/nginx-pod.yaml b/sensor/tests/resource/service/yaml/nginx-pod.yaml index 2ef3123658487..420a2614319a5 100644 --- a/sensor/tests/resource/service/yaml/nginx-pod.yaml +++ b/sensor/tests/resource/service/yaml/nginx-pod.yaml @@ -7,6 +7,6 @@ metadata: spec: containers: - name: nginx - image: nginx:1.14.1 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/sensor/tests/resource/service/yaml/nginx.yaml b/sensor/tests/resource/service/yaml/nginx.yaml index c295e5f91af27..9eb5b5270c387 100644 --- a/sensor/tests/resource/service/yaml/nginx.yaml +++ b/sensor/tests/resource/service/yaml/nginx.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.14.2 + image: quay.io/rhacs-eng/qa-multi-arch:nginx-1.21.1 ports: - containerPort: 80 diff --git a/tests/e2e/sensor.sh b/tests/e2e/sensor.sh index 411a2980b607d..21c6611096586 100755 --- a/tests/e2e/sensor.sh +++ b/tests/e2e/sensor.sh @@ -31,6 +31,15 @@ test_sensor() { test_preamble setup_deployment_env false false + + # Prefetch images for OSCI (DaemonSet-based prefetcher) + # For Kind (GitHub Actions), images are loaded directly via kind load + if [[ "${IMAGE_PREFETCH_DISABLED:-false}" != "true" ]]; then + info "Starting image prefetching for sensor integration tests" + image_prefetcher_prebuilt_start + image_prefetcher_prebuilt_await + fi + # shellcheck disable=SC2119 remove_existing_stackrox_resources From 87cdb0967a687842378edcd8a6770ecb2282d00c Mon Sep 17 00:00:00 2001 From: Alex Vulaj Date: Tue, 10 Feb 2026 09:57:50 -0500 Subject: [PATCH 150/232] ROX-32152: Implement label matching in scopecomp with provider pattern (#18661) --- central/policy/matcher/cluster.go | 4 +- central/policy/matcher/deployment.go | 2 +- central/policy/matcher/namespace.go | 2 +- central/policy/service/validator.go | 2 +- pkg/detection/compiled_policy.go | 4 +- pkg/scopecomp/providers.go | 11 ++ pkg/scopecomp/scope.go | 126 ++++++++++++++--- pkg/scopecomp/scope_test.go | 202 ++++++++++++++++++++++++--- 8 files changed, 306 insertions(+), 47 deletions(-) create mode 100644 pkg/scopecomp/providers.go diff --git a/central/policy/matcher/cluster.go b/central/policy/matcher/cluster.go index d255a020be8c5..2c3f38457aea1 100644 --- a/central/policy/matcher/cluster.go +++ b/central/policy/matcher/cluster.go @@ -50,7 +50,7 @@ func (m *clusterMatcher) anyExclusionMatches(exclusions []*storage.Exclusion) bo } func (m *clusterMatcher) exclusionMatches(exclusion *storage.Exclusion) bool { - cs, err := scopecomp.CompileScope(exclusion.GetDeployment().GetScope()) + cs, err := scopecomp.CompileScope(exclusion.GetDeployment().GetScope(), nil, nil) if err != nil { utils.Should(errors.Wrap(err, "could not compile excluded scopes")) return false @@ -86,7 +86,7 @@ func (m *clusterMatcher) anyScopeMatches(scopes []*storage.Scope) bool { } func (m *clusterMatcher) scopeMatches(scope *storage.Scope) bool { - cs, err := scopecomp.CompileScope(scope) + cs, err := scopecomp.CompileScope(scope, nil, nil) if err != nil { utils.Should(errors.Wrap(err, "could not compile scope")) return false diff --git a/central/policy/matcher/deployment.go b/central/policy/matcher/deployment.go index ff4256112862c..42884bf0bffd2 100644 --- a/central/policy/matcher/deployment.go +++ b/central/policy/matcher/deployment.go @@ -74,7 +74,7 @@ func (m *deploymentMatcher) anyScopeMatches(scopes []*storage.Scope) bool { } func (m *deploymentMatcher) scopeMatches(scope *storage.Scope) bool { - cs, err := scopecomp.CompileScope(scope) + cs, err := scopecomp.CompileScope(scope, nil, nil) if err != nil { utils.Should(errors.Wrap(err, "could not compile scope")) return false diff --git a/central/policy/matcher/namespace.go b/central/policy/matcher/namespace.go index 9bf96711fae45..7e6c9c2dd50d7 100644 --- a/central/policy/matcher/namespace.go +++ b/central/policy/matcher/namespace.go @@ -65,7 +65,7 @@ func (m *namespaceMatcher) anyScopeMatches(scopes []*storage.Scope) bool { } func (m *namespaceMatcher) scopeMatches(scope *storage.Scope) bool { - cs, err := scopecomp.CompileScope(scope) + cs, err := scopecomp.CompileScope(scope, nil, nil) if err != nil { utils.Should(errors.Wrap(err, "could not compiled scope")) return false diff --git a/central/policy/service/validator.go b/central/policy/service/validator.go index 8c8a9e091fe0d..a60cb26367732 100644 --- a/central/policy/service/validator.go +++ b/central/policy/service/validator.go @@ -341,7 +341,7 @@ func (s *policyValidator) validateScope(scope *storage.Scope) error { if scope.GetNamespace() != "" && scope.GetNamespaceLabel() != nil { return errors.New("scope cannot have both 'namespace' and 'namespace_label' fields populated") } - if _, err := scopecomp.CompileScope(scope); err != nil { + if _, err := scopecomp.CompileScope(scope, nil, nil); err != nil { return errors.Wrap(err, "could not compile scope") } return nil diff --git a/pkg/detection/compiled_policy.go b/pkg/detection/compiled_policy.go index 448ddf49a3dad..0413edf32c733 100644 --- a/pkg/detection/compiled_policy.go +++ b/pkg/detection/compiled_policy.go @@ -47,7 +47,7 @@ func newCompiledPolicy(policy *storage.Policy) (CompiledPolicy, error) { scopes := make([]*scopecomp.CompiledScope, 0, len(policy.GetScope())) for _, s := range policy.GetScope() { - compiledScope, err := scopecomp.CompileScope(s) + compiledScope, err := scopecomp.CompileScope(s, nil, nil) if err != nil { return nil, errors.Wrapf(err, "compiling scope %+v for policy %q", s, policy.GetName()) } @@ -437,7 +437,7 @@ func newCompiledExclusion(exclusion *storage.Exclusion) (*compiledExclusion, err } } if scope := exclusion.GetDeployment().GetScope(); scope != nil { - cs, err := scopecomp.CompileScope(exclusion.GetDeployment().GetScope()) + cs, err := scopecomp.CompileScope(exclusion.GetDeployment().GetScope(), nil, nil) if err != nil { return nil, err } diff --git a/pkg/scopecomp/providers.go b/pkg/scopecomp/providers.go new file mode 100644 index 0000000000000..2a705a9da0c1b --- /dev/null +++ b/pkg/scopecomp/providers.go @@ -0,0 +1,11 @@ +package scopecomp + +// ClusterLabelProvider provides cluster labels for a given cluster ID. +type ClusterLabelProvider interface { + GetClusterLabels(clusterID string) (map[string]string, error) +} + +// NamespaceLabelProvider provides namespace labels for a given namespace ID. +type NamespaceLabelProvider interface { + GetNamespaceLabels(namespaceID string) (map[string]string, error) +} diff --git a/pkg/scopecomp/scope.go b/pkg/scopecomp/scope.go index caef0fa616720..d93dc2579d3cb 100644 --- a/pkg/scopecomp/scope.go +++ b/pkg/scopecomp/scope.go @@ -3,47 +3,126 @@ package scopecomp import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/regexutils" ) +var ( + log = logging.LoggerForModule() +) + +const ( + clusterLabelType = "cluster label" + namespaceLabelType = "namespace label" + deploymentLabelType = "deployment label" +) + // CompiledScope a transformed scope into the relevant regexes type CompiledScope struct { ClusterID string + + ClusterLabelKey regexutils.StringMatcher + ClusterLabelValue regexutils.StringMatcher + Namespace regexutils.StringMatcher + NamespaceLabelKey regexutils.StringMatcher + NamespaceLabelValue regexutils.StringMatcher + LabelKey regexutils.StringMatcher LabelValue regexutils.StringMatcher + + clusterLabelProvider ClusterLabelProvider + namespaceLabelProvider NamespaceLabelProvider +} + +// compileLabelMatchers compiles key and value regex matchers for a label. +func compileLabelMatchers(label *storage.Scope_Label, labelType string) (keyMatcher, valueMatcher regexutils.StringMatcher, err error) { + if label == nil { + return nil, nil, nil + } + + keyMatcher, err = regexutils.CompileWholeStringMatcher(label.GetKey(), regexutils.Flags{CaseInsensitive: true}) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to compile %s key regex", labelType) + } + + valueMatcher, err = regexutils.CompileWholeStringMatcher(label.GetValue(), regexutils.Flags{CaseInsensitive: true}) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to compile %s value regex", labelType) + } + + return keyMatcher, valueMatcher, nil } // CompileScope takes in a scope and compiles it into regexes unless the regexes are invalid -func CompileScope(scope *storage.Scope) (*CompiledScope, error) { +func CompileScope(scope *storage.Scope, clusterLabelProvider ClusterLabelProvider, namespaceLabelProvider NamespaceLabelProvider) (*CompiledScope, error) { namespaceReg, err := regexutils.CompileWholeStringMatcher(scope.GetNamespace(), regexutils.Flags{CaseInsensitive: true}) if err != nil { return nil, errors.Errorf("namespace regex %q could not be compiled", err) } cs := &CompiledScope{ - ClusterID: scope.GetCluster(), - Namespace: namespaceReg, + ClusterID: scope.GetCluster(), + Namespace: namespaceReg, + clusterLabelProvider: clusterLabelProvider, + namespaceLabelProvider: namespaceLabelProvider, } - if scope.GetLabel() != nil { - cs.LabelKey, err = regexutils.CompileWholeStringMatcher(scope.GetLabel().GetKey(), regexutils.Flags{CaseInsensitive: true}) + if features.LabelBasedPolicyScoping.Enabled() { + cs.ClusterLabelKey, cs.ClusterLabelValue, err = compileLabelMatchers(scope.GetClusterLabel(), clusterLabelType) if err != nil { - return nil, errors.Errorf("label key regex %q could not be compiled", err) - } - if cs.LabelKey == nil { - return nil, errors.Errorf("label %q=%q is invalid", scope.GetLabel().GetKey(), scope.GetLabel().GetValue()) + return nil, err } - cs.LabelValue, err = regexutils.CompileWholeStringMatcher(scope.GetLabel().GetValue(), regexutils.Flags{CaseInsensitive: true}) + cs.NamespaceLabelKey, cs.NamespaceLabelValue, err = compileLabelMatchers(scope.GetNamespaceLabel(), namespaceLabelType) if err != nil { - return nil, errors.Errorf("label value regex %q could not be compiled", err) + return nil, err } } + + cs.LabelKey, cs.LabelValue, err = compileLabelMatchers(scope.GetLabel(), deploymentLabelType) + if err != nil { + return nil, err + } return cs, nil } +// MatchesClusterLabels evaluates cluster label matchers against a deployment's cluster +func (c *CompiledScope) MatchesClusterLabels(deployment *storage.Deployment) bool { + if !features.LabelBasedPolicyScoping.Enabled() || c.ClusterLabelKey == nil { + return true + } + if c.clusterLabelProvider == nil { + log.Error("Cluster label matcher defined but provider is nil - failing closed") + return false + } + clusterLabels, err := c.clusterLabelProvider.GetClusterLabels(deployment.GetClusterId()) + if err != nil { + log.Errorf("Failed to fetch cluster labels for cluster %s: %v", deployment.GetClusterId(), err) + return false + } + return c.MatchesLabels(c.ClusterLabelKey, c.ClusterLabelValue, clusterLabels) +} + +// MatchesNamespaceLabels evaluates namespace label matchers against a deployment's namespace +func (c *CompiledScope) MatchesNamespaceLabels(deployment *storage.Deployment) bool { + if !features.LabelBasedPolicyScoping.Enabled() || c.NamespaceLabelKey == nil { + return true + } + if c.namespaceLabelProvider == nil { + log.Error("Namespace label matcher defined but provider is nil - failing closed") + return false + } + namespaceLabels, err := c.namespaceLabelProvider.GetNamespaceLabels(deployment.GetNamespaceId()) + if err != nil { + log.Errorf("Failed to fetch namespace labels for namespace %s: %v", deployment.GetNamespaceId(), err) + return false + } + return c.MatchesLabels(c.NamespaceLabelKey, c.NamespaceLabelValue, namespaceLabels) +} + // MatchesDeployment evaluates a compiled scope against a deployment func (c *CompiledScope) MatchesDeployment(deployment *storage.Deployment) bool { if c == nil { @@ -52,22 +131,31 @@ func (c *CompiledScope) MatchesDeployment(deployment *storage.Deployment) bool { if !c.MatchesCluster(deployment.GetClusterId()) { return false } + if !c.MatchesClusterLabels(deployment) { + return false + } if !c.MatchesNamespace(deployment.GetNamespace()) { return false } + if !c.MatchesNamespaceLabels(deployment) { + return false + } + if !c.MatchesLabels(c.LabelKey, c.LabelValue, deployment.GetLabels()) { + return false + } + return true +} - if c.LabelKey == nil { +func (c *CompiledScope) MatchesLabels(keyMatcher regexutils.StringMatcher, valueMatcher regexutils.StringMatcher, labels map[string]string) bool { + if keyMatcher == nil { return true } - - var matched bool - for key, value := range deployment.GetLabels() { - if c.LabelKey.MatchString(key) && c.LabelValue.MatchString(value) { - matched = true - break + for key, value := range labels { + if keyMatcher.MatchString(key) && valueMatcher.MatchString(value) { + return true } } - return matched + return false } // MatchesNamespace evaluates a compiled scope against a namespace diff --git a/pkg/scopecomp/scope_test.go b/pkg/scopecomp/scope_test.go index 1e44f7904d694..cc8356a0e4d07 100644 --- a/pkg/scopecomp/scope_test.go +++ b/pkg/scopecomp/scope_test.go @@ -4,22 +4,45 @@ import ( "testing" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +// Mock providers for testing +type mockClusterLabelProvider struct { + labels map[string]string +} + +func (m *mockClusterLabelProvider) GetClusterLabels(clusterID string) (map[string]string, error) { + return m.labels, nil +} + +type mockNamespaceLabelProvider struct { + labels map[string]string +} + +func (m *mockNamespaceLabelProvider) GetNamespaceLabels(namespaceID string) (map[string]string, error) { + return m.labels, nil +} + func TestWithinScope(t *testing.T) { subtests := []struct { - name string - scope *storage.Scope - deployment *storage.Deployment - result bool + name string + scope *storage.Scope + deployment *storage.Deployment + clusterLabels map[string]string + namespaceLabels map[string]string + featureFlagEnabled bool + result bool }{ { - name: "empty scope", - scope: &storage.Scope{}, - deployment: &storage.Deployment{}, - result: true, + name: "empty scope", + scope: &storage.Scope{}, + deployment: &storage.Deployment{}, + featureFlagEnabled: false, + result: true, }, { name: "matching cluster", @@ -29,7 +52,8 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ ClusterId: "cluster", }, - result: true, + featureFlagEnabled: false, + result: true, }, { name: "not matching cluster", @@ -39,7 +63,8 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ ClusterId: "cluster", }, - result: false, + featureFlagEnabled: false, + result: false, }, { name: "matching namespace", @@ -49,7 +74,8 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ Namespace: "namespace", }, - result: true, + featureFlagEnabled: false, + result: true, }, { name: "not matching namespace", @@ -59,7 +85,8 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ Namespace: "namespace", }, - result: false, + featureFlagEnabled: false, + result: false, }, { name: "matching cluster with no namespace scope", @@ -70,7 +97,8 @@ func TestWithinScope(t *testing.T) { ClusterId: "cluster", Namespace: "namespace", }, - result: true, + featureFlagEnabled: false, + result: true, }, { name: "matching label", @@ -86,7 +114,8 @@ func TestWithinScope(t *testing.T) { "key2": "value2", }, }, - result: true, + featureFlagEnabled: false, + result: true, }, { name: "not matching label value", @@ -102,7 +131,8 @@ func TestWithinScope(t *testing.T) { "key2": "value2", }, }, - result: false, + featureFlagEnabled: false, + result: false, }, { name: "not matching key value", @@ -118,7 +148,8 @@ func TestWithinScope(t *testing.T) { "key2": "value2", }, }, - result: false, + featureFlagEnabled: false, + result: false, }, { name: "match all", @@ -138,7 +169,8 @@ func TestWithinScope(t *testing.T) { "key2": "value2", }, }, - result: true, + featureFlagEnabled: false, + result: true, }, { name: "scope with cluster_label", @@ -151,7 +183,9 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ ClusterId: "cluster", }, - result: true, + clusterLabels: map[string]string{"env": "prod"}, + featureFlagEnabled: true, + result: true, }, { name: "scope with namespace_label", @@ -164,7 +198,9 @@ func TestWithinScope(t *testing.T) { deployment: &storage.Deployment{ Namespace: "default", }, - result: true, + namespaceLabels: map[string]string{"team": "backend"}, + featureFlagEnabled: true, + result: true, }, { name: "scope with cluster_label and namespace_label", @@ -182,11 +218,135 @@ func TestWithinScope(t *testing.T) { ClusterId: "cluster", Namespace: "default", }, - result: true, + clusterLabels: map[string]string{"env": "prod"}, + namespaceLabels: map[string]string{"team": "backend"}, + featureFlagEnabled: true, + result: true, + }, + // Test cases verifying feature flag behavior + { + name: "cluster_label mismatch with flag OFF is ignored", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + }, + clusterLabels: map[string]string{"env": "dev"}, + featureFlagEnabled: false, + result: true, + }, + { + name: "cluster_label mismatch with flag ON fails", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + }, + clusterLabels: map[string]string{"env": "dev"}, + featureFlagEnabled: true, + result: false, + }, + { + name: "namespace_label mismatch with flag OFF is ignored", + scope: &storage.Scope{ + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + deployment: &storage.Deployment{ + Namespace: "default", + }, + namespaceLabels: map[string]string{"team": "frontend"}, + featureFlagEnabled: false, + result: true, + }, + { + name: "namespace_label mismatch with flag ON fails", + scope: &storage.Scope{ + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + deployment: &storage.Deployment{ + Namespace: "default", + }, + namespaceLabels: map[string]string{"team": "frontend"}, + featureFlagEnabled: true, + result: false, + }, + // Test cases for nil provider handling + { + name: "nil providers with no label matchers should pass", + scope: &storage.Scope{ + Cluster: "cluster", + Namespace: "namespace", + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + Namespace: "namespace", + }, + clusterLabels: nil, + namespaceLabels: nil, + featureFlagEnabled: true, + result: true, + }, + { + name: "nil cluster provider with cluster_label matcher should fail", + scope: &storage.Scope{ + ClusterLabel: &storage.Scope_Label{ + Key: "env", + Value: "prod", + }, + }, + deployment: &storage.Deployment{ + ClusterId: "cluster", + }, + clusterLabels: nil, + namespaceLabels: nil, + featureFlagEnabled: true, + result: false, + }, + { + name: "nil namespace provider with namespace_label matcher should fail", + scope: &storage.Scope{ + NamespaceLabel: &storage.Scope_Label{ + Key: "team", + Value: "backend", + }, + }, + deployment: &storage.Deployment{ + Namespace: "default", + }, + clusterLabels: nil, + namespaceLabels: nil, + featureFlagEnabled: true, + result: false, }, } for _, test := range subtests { - cs, err := CompileScope(test.scope) + testutils.MustUpdateFeature(t, features.LabelBasedPolicyScoping, test.featureFlagEnabled) + + // Create mock providers that return test data + var clusterProvider ClusterLabelProvider + var namespaceProvider NamespaceLabelProvider + if test.clusterLabels != nil { + clusterProvider = &mockClusterLabelProvider{labels: test.clusterLabels} + } + if test.namespaceLabels != nil { + namespaceProvider = &mockNamespaceLabelProvider{labels: test.namespaceLabels} + } + + cs, err := CompileScope(test.scope, clusterProvider, namespaceProvider) require.NoError(t, err) assert.Equalf(t, test.result, cs.MatchesDeployment(test.deployment), "Failed test '%s'", test.name) } From 6f0efeb0e344cce5a3dd8eb2cab361642c17389a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luis=20Valero=20Mart=C3=ADn?= Date: Tue, 10 Feb 2026 16:38:35 +0100 Subject: [PATCH 151/232] ROX-32262: Buffered consumer implementation (#18798) Co-authored-by: Claude Sonnet 4.5 --- sensor/common/pubsub/consumer/buffered.go | 175 ++++++++ .../common/pubsub/consumer/buffered_test.go | 382 ++++++++++++++++++ 2 files changed, 557 insertions(+) create mode 100644 sensor/common/pubsub/consumer/buffered.go create mode 100644 sensor/common/pubsub/consumer/buffered_test.go diff --git a/sensor/common/pubsub/consumer/buffered.go b/sensor/common/pubsub/consumer/buffered.go new file mode 100644 index 0000000000000..5cf449e0199fd --- /dev/null +++ b/sensor/common/pubsub/consumer/buffered.go @@ -0,0 +1,175 @@ +package consumer + +import ( + "time" + + "github.com/pkg/errors" + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/safe" + "github.com/stackrox/rox/sensor/common/pubsub" + pubsubErrors "github.com/stackrox/rox/sensor/common/pubsub/errors" + "github.com/stackrox/rox/sensor/common/pubsub/metrics" +) + +const ( + defaultBufferSize = 1000 +) + +// bufferedEvent wraps an event with its error channel to pipe callback errors back to the caller +type bufferedEvent struct { + event pubsub.Event + errC chan<- error + startTime time.Time +} + +func WithBufferedConsumerSize(size int) pubsub.ConsumerOption { + return func(consumer pubsub.Consumer) { + impl, ok := consumer.(*BufferedConsumer) + if !ok { + return + } + if size < 0 { + return + } + impl.size = size + } +} + +func NewBufferedConsumer(laneID pubsub.LaneID, topic pubsub.Topic, consumerID pubsub.ConsumerID, callback pubsub.EventCallback, opts ...pubsub.ConsumerOption) (pubsub.Consumer, error) { + if callback == nil { + return nil, errors.Wrap(pubsubErrors.UndefinedEventCallbackErr, "") + } + ret := &BufferedConsumer{ + laneID: laneID, + topic: topic, + consumerID: consumerID, + callback: callback, + stopper: concurrency.NewStopper(), + size: defaultBufferSize, + } + for _, opt := range opts { + opt(ret) + } + ret.buffer = safe.NewChannel[*bufferedEvent](ret.size, ret.stopper.LowLevel().GetStopRequestSignal()) + go ret.run() + return ret, nil +} + +type BufferedConsumer struct { + laneID pubsub.LaneID + topic pubsub.Topic + consumerID pubsub.ConsumerID + callback pubsub.EventCallback + size int + stopper concurrency.Stopper + buffer *safe.Channel[*bufferedEvent] +} + +func (c *BufferedConsumer) Consume(waitable concurrency.Waitable, event pubsub.Event) <-chan error { + errC := make(chan error, 1) + // No goroutine needed: all operations in consume are non-blocking. + // The select statements use default cases, TryWrite is non-blocking by design, + // and errC has size 1 so the single send on error won't block. + c.consume(waitable, event, errC) + return errC +} + +func (c *BufferedConsumer) consume(waitable concurrency.Waitable, event pubsub.Event, errC chan<- error) { + // IMPORTANT: All operations must remain non-blocking. + start := time.Now() + operation := metrics.ConsumerError + + // Priority 1: Check if already cancelled + select { + case <-waitable.Done(): + c.closeAndRecord(errC, operation, start) + return + case <-c.stopper.Flow().StopRequested(): + c.closeAndRecord(errC, operation, start) + return + default: + } + + // Wrap event with its errC to pipe callback errors back to caller + wrappedEvent := &bufferedEvent{ + event: event, + errC: errC, + startTime: start, + } + + // SafeChannel.TryWrite is non-blocking by design, so it's safe to call directly + writeErr := c.buffer.TryWrite(wrappedEvent) + + // Priority 2: If write failed, send error and close. Otherwise keep errC open. + if writeErr != nil { + errC <- writeErr // Won't block - buffered channel of size 1 + c.closeAndRecord(errC, operation, start) + } + // If writeErr is nil, errC stays open and will be closed later when callback completes +} + +func (c *BufferedConsumer) Stop() { + c.stopper.Client().Stop() + <-c.stopper.Client().Stopped().Done() + c.buffer.Close() + // Drain events and close their errC + for ev := range c.buffer.Chan() { + close(ev.errC) + } +} + +func (c *BufferedConsumer) run() { + defer c.stopper.Flow().ReportStopped() + for { + // Priority 1: Check if stop requested + select { + case <-c.stopper.Flow().StopRequested(): + return + default: + } + // Priority 2: Read event, but respect stop during blocking read + select { + case <-c.stopper.Flow().StopRequested(): + return + case wrappedEv, ok := <-c.buffer.Chan(): + if !ok { + return + } + c.handleEvent(wrappedEv) + } + } +} + +func (c *BufferedConsumer) handleEvent(wrappedEv *bufferedEvent) { + // Execute callback in separate goroutine to prevent blocking the consumer + callbackDone := make(chan error, 1) + go func() { + callbackDone <- c.callback(wrappedEv.event) + close(callbackDone) + }() + // Wait for callback or stopper, allowing clean exit if callback blocks + operation := metrics.Processed + select { + case <-c.stopper.Flow().StopRequested(): + // Consumer is stopping - close the errC without waiting for callback + operation = metrics.ConsumerError + case err := <-callbackDone: + // Callback completed - send error if present, otherwise just close errC + if err != nil { + operation = metrics.ConsumerError + wrappedEv.errC <- err + } + // On success (err == nil), defer close handles it without sending + } + c.closeAndRecord(wrappedEv.errC, operation, wrappedEv.startTime) +} + +func (c *BufferedConsumer) closeAndRecord(errC chan<- error, op metrics.Operation, start time.Time) { + close(errC) + c.recordMetrics(op, start) +} + +func (c *BufferedConsumer) recordMetrics(op metrics.Operation, start time.Time) { + metrics.ObserveProcessingDuration(c.laneID, c.topic, c.consumerID, time.Since(start), op) + metrics.RecordConsumerOperation(c.laneID, c.topic, c.consumerID, op) +} diff --git a/sensor/common/pubsub/consumer/buffered_test.go b/sensor/common/pubsub/consumer/buffered_test.go new file mode 100644 index 0000000000000..afa258e35d50d --- /dev/null +++ b/sensor/common/pubsub/consumer/buffered_test.go @@ -0,0 +1,382 @@ +package consumer + +import ( + "context" + "sync/atomic" + "testing" + "testing/synctest" + + "github.com/pkg/errors" + "github.com/stackrox/rox/pkg/concurrency" + "github.com/stackrox/rox/pkg/safe" + "github.com/stackrox/rox/pkg/sync" + "github.com/stackrox/rox/pkg/testutils/goleak" + "github.com/stackrox/rox/sensor/common/pubsub" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewBufferedConsumer_NilCallback(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, nil) + assert.Error(t, err) + assert.Nil(t, c) +} + +func TestBufferedConsumer_ConsumeSuccess(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + callbackCalled := false + eventData := "test-data" + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + callbackCalled = true + te, ok := event.(*testEvent) + require.True(t, ok) + assert.Equal(t, eventData, te.data) + return nil + }) + require.NoError(t, err) + defer c.Stop() + + ctx := context.Background() + errC := c.Consume(ctx, &testEvent{data: eventData}) + + // Wait for callback to complete + synctest.Wait() + assert.True(t, callbackCalled) + + // Verify errC closes without sending anything (success case) + err, ok := <-errC + assert.False(t, ok, "errC should be closed on success without sending nil") + assert.Nil(t, err) + }) +} + +func TestBufferedConsumer_CallbackError(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + expectedErr := errors.New("callback error") + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + return expectedErr + }) + require.NoError(t, err) + defer c.Stop() + + ctx := context.Background() + errC := c.Consume(ctx, &testEvent{data: "test"}) + + // Wait for callback to complete + synctest.Wait() + + // Verify errC receives the callback error and closes + err, ok := <-errC + assert.True(t, ok, "errC should be open for error") + assert.Equal(t, expectedErr, err) + + err, ok = <-errC + assert.False(t, ok, "errC should be closed after error") + assert.Nil(t, err) + }) +} + +func TestBufferedConsumer_BufferFull(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + blockCallback := concurrency.NewSignal() + + c, err := NewBufferedConsumer( + pubsub.DefaultLane, + pubsub.DefaultTopic, + pubsub.DefaultConsumer, + func(event pubsub.Event) error { + <-blockCallback.Done() + return nil + }, + WithBufferedConsumerSize(1), + ) + require.NoError(t, err) + defer c.Stop() + + ctx := context.Background() + + // Block the buffer + errC1 := c.Consume(ctx, &testEvent{data: "1"}) + synctest.Wait() + + // This event should be buffered + errC2 := c.Consume(ctx, &testEvent{data: "2"}) + synctest.Wait() + + // Buffer should be full - this should fail with ErrChannelFull + errC3 := c.Consume(ctx, &testEvent{data: "3"}) + synctest.Wait() + + // Second consume should get buffer full error + err, ok := <-errC3 + assert.True(t, ok) + assert.Equal(t, safe.ErrChannelFull, err) + + err, ok = <-errC3 + assert.False(t, ok, "errC2 should be closed") + assert.Nil(t, err) + + // First errC should still be open (callback blocked) + select { + case <-errC1: + t.Fatal("errC1 should still be open, callback is blocked") + default: + } + // Second errC should still be open (callback blocked) + select { + case <-errC2: + t.Fatal("errC2 should still be open, callback is blocked") + default: + } + + // Unblock callback and wait for completion + blockCallback.Signal() + synctest.Wait() + + // Now errC1 should complete (close without sending on success) + err, ok = <-errC1 + assert.False(t, ok, "errC1 should be closed on success without sending nil") + assert.Nil(t, err) + + // Now errC2 should complete (close without sending on success) + err, ok = <-errC2 + assert.False(t, ok, "errC2 should be closed on success without sending nil") + assert.Nil(t, err) + }) +} + +func TestBufferedConsumer_WaitableCancellation(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + return nil + }) + require.NoError(t, err) + defer c.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel before Consume + + errC := c.Consume(ctx, &testEvent{data: "test"}) + synctest.Wait() + + // errC should close immediately without processing + err, ok := <-errC + assert.False(t, ok, "errC should be closed when waitable already cancelled") + assert.Nil(t, err) + }) +} + +func TestBufferedConsumer_StopDuringConsume(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + blockCallback := make(chan struct{}) + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + <-blockCallback + // We should not receive an error since we are Stopping before reading + return errors.New("some error") + }) + require.NoError(t, err) + + ctx := context.Background() + errC := c.Consume(ctx, &testEvent{data: "test"}) + + // Wait for event to be buffered and callback to start + synctest.Wait() + + // Stop the consumer (callback is still blocked) + c.Stop() + + // errC should close without sending error + err, ok := <-errC + assert.False(t, ok, "errC should be closed after Stop") + assert.Nil(t, err) + // close blockCallback to not leak goroutines + close(blockCallback) + }) +} + +func TestBufferedConsumer_ConcurrentConsume(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + const numEvents = 10 + var callbackCount atomic.Int32 + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + callbackCount.Add(1) + return nil + }) + require.NoError(t, err) + defer c.Stop() + + ctx := context.Background() + var errChannelsLock sync.Mutex + var errChannels []<-chan error + + // Consume multiple events concurrently + for range numEvents { + go func() { + errC := c.Consume(ctx, &testEvent{data: "test"}) + concurrency.WithLock(&errChannelsLock, func() { + errChannels = append(errChannels, errC) + }) + }() + } + + // Wait for all to complete + synctest.Wait() + + // All callbacks should have been called + assert.Equal(t, int32(numEvents), callbackCount.Load()) + + // All errC channels should complete successfully (close without sending) + for i, errC := range errChannels { + err, ok := <-errC + assert.False(t, ok, "errC %d should be closed on success without sending nil", i) + assert.Nil(t, err, "errC %d should have no error", i) + } + }) +} + +func TestBufferedConsumer_WithBufferedConsumerSize(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + c, err := NewBufferedConsumer( + pubsub.DefaultLane, + pubsub.DefaultTopic, + pubsub.DefaultConsumer, + func(event pubsub.Event) error { return nil }, + WithBufferedConsumerSize(5), + ) + require.NoError(t, err) + defer c.Stop() + + impl, ok := c.(*BufferedConsumer) + require.True(t, ok) + assert.Equal(t, 5, impl.size) + assert.Equal(t, 5, impl.buffer.Cap()) +} + +func TestBufferedConsumer_WithBufferedConsumerSize_Negative(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + // Negative size should be ignored + c, err := NewBufferedConsumer( + pubsub.DefaultLane, + pubsub.DefaultTopic, + pubsub.DefaultConsumer, + func(event pubsub.Event) error { return nil }, + WithBufferedConsumerSize(-1), + ) + require.NoError(t, err) + defer c.Stop() + + impl, ok := c.(*BufferedConsumer) + require.True(t, ok) + assert.Equal(t, 1000, impl.size, "should use default size") +} + +func TestBufferedConsumer_StopIdempotent(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + return nil + }) + require.NoError(t, err) + + // Multiple Stop() calls should not panic + c.Stop() + c.Stop() + c.Stop() +} + +func TestBufferedConsumer_ConsumeAfterStop(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + c, err := NewBufferedConsumer(pubsub.DefaultLane, pubsub.DefaultTopic, pubsub.DefaultConsumer, func(event pubsub.Event) error { + return nil + }) + require.NoError(t, err) + + c.Stop() + synctest.Wait() + + ctx := context.Background() + errC := c.Consume(ctx, &testEvent{data: "test"}) + synctest.Wait() + + // errC should be closed immediately without error since stopper is triggered + err, ok := <-errC + assert.False(t, ok, "errC should be closed when consumer is stopped") + assert.Nil(t, err) + }) +} + +func TestBufferedConsumer_StopDrainsBufferedEvents(t *testing.T) { + defer goleak.AssertNoGoroutineLeaks(t) + + synctest.Test(t, func(t *testing.T) { + blockCallback := make(chan struct{}) + + c, err := NewBufferedConsumer( + pubsub.DefaultLane, + pubsub.DefaultTopic, + pubsub.DefaultConsumer, + func(event pubsub.Event) error { + <-blockCallback + // We should not receive an error since we are Stopping before reading + return errors.New("some error") + }, + WithBufferedConsumerSize(3), + ) + require.NoError(t, err) + + ctx := context.Background() + + // Consume first event - it will be picked up by the consumer and block on callback + errC1 := c.Consume(ctx, &testEvent{data: "1"}) + synctest.Wait() + + // Consume second and third events - they will be buffered but not yet processed + errC2 := c.Consume(ctx, &testEvent{data: "2"}) + errC3 := c.Consume(ctx, &testEvent{data: "3"}) + synctest.Wait() + + // Stop the consumer while events 2 and 3 are still buffered + c.Stop() + + // All errC channels should be closed by Stop() + // errC1: closed because handleEvent detects stop + // errC2 and errC3: closed by the drain loop in Stop() + + err, ok := <-errC1 + assert.False(t, ok, "errC1 should be closed after Stop") + assert.Nil(t, err) + + err, ok = <-errC2 + assert.False(t, ok, "errC2 should be closed by drain loop in Stop") + assert.Nil(t, err) + + err, ok = <-errC3 + assert.False(t, ok, "errC3 should be closed by drain loop in Stop") + assert.Nil(t, err) + + // close blockCallback to not leak goroutines + close(blockCallback) + }) +} From f5f9db8e2f2d7f3e3d0e24e8c55cfa89f056813b Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Tue, 10 Feb 2026 10:51:31 -0600 Subject: [PATCH 152/232] ROX-33051: Increase rhel-vex updater timeout (#18926) --- .../scanner-versioned-definitions-update.yaml | 3 ++ scanner/updater/export.go | 52 ++++++++++++++++++- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/.github/workflows/scanner-versioned-definitions-update.yaml b/.github/workflows/scanner-versioned-definitions-update.yaml index 54622a9d776cf..12c7b57fe9f44 100644 --- a/.github/workflows/scanner-versioned-definitions-update.yaml +++ b/.github/workflows/scanner-versioned-definitions-update.yaml @@ -204,6 +204,7 @@ jobs: env: SCANNER_BUNDLE_VERSION: ${{ matrix.version }} ROX_GIT_REF: ${{ matrix.ref }} + STACKROX_RHEL_VEX_COMPRESSED_FILE_TIMEOUT: "10m" steps: # Checkout master to get the latest local actions - name: Checkout repository @@ -288,6 +289,8 @@ jobs: - /tmp:/tmp - /usr:/mnt/usr - /opt:/mnt/opt + env: + STACKROX_RHEL_VEX_COMPRESSED_FILE_TIMEOUT: "10m" steps: # Checkout master to get the latest local actions - name: Checkout repository diff --git a/scanner/updater/export.go b/scanner/updater/export.go index 401bb83582a55..5201c18cc8565 100644 --- a/scanner/updater/export.go +++ b/scanner/updater/export.go @@ -12,10 +12,12 @@ import ( "github.com/klauspost/compress/zstd" "github.com/pkg/errors" + "github.com/quay/claircore" "github.com/quay/claircore/enricher/epss" "github.com/quay/claircore/libvuln/driver" "github.com/quay/claircore/libvuln/jsonblob" "github.com/quay/claircore/libvuln/updates" + "github.com/quay/claircore/rhel/vex" "github.com/quay/zlog" "github.com/stackrox/rox/scanner/enricher/csaf" "github.com/stackrox/rox/scanner/enricher/nvd" @@ -26,6 +28,10 @@ import ( _ "github.com/quay/claircore/updater/defaults" ) +const ( + rhelVexUpdaterName = "rhel-vex" +) + var ( // ccUpdaterSets represents Claircore updater sets to initialize. ccUpdaterSets = []string{ @@ -35,7 +41,7 @@ var ( "oracle", "osv", "photon", - "rhel-vex", + rhelVexUpdaterName, "suse", "ubuntu", } @@ -66,8 +72,13 @@ func Export(ctx context.Context, outputDir string, opts *ExportOptions) error { bundles["epss"] = epssOpts() bundles["stackrox-rhel-csaf"] = redhatCSAFOpts() + // Claircore Updaters. for _, uSet := range ccUpdaterSets { - bundles[uSet] = []updates.ManagerOption{updates.WithEnabled([]string{uSet})} + managerOpts := []updates.ManagerOption{updates.WithEnabled([]string{uSet})} + if uSet == rhelVexUpdaterName { + managerOpts = rhelVexOpts() + } + bundles[uSet] = managerOpts } // Rate limit to ~16 requests/second by default. @@ -169,6 +180,43 @@ func epssOpts() []updates.ManagerOption { } } +func rhelVexOpts() []updates.ManagerOption { + return []updates.ManagerOption{ + updates.WithEnabled([]string{rhelVexUpdaterName}), + updates.WithConfigs(map[string]driver.ConfigUnmarshaler{ + rhelVexUpdaterName: func(i any) error { + ctx := zlog.ContextWithValues(context.Background(), "updater", rhelVexUpdaterName) + + // This function gets called for both the Factory and the Updater. + // We only need to configure the Factory (which has the CompressedFileTimeout field). + switch cfg := i.(type) { + case *vex.FactoryConfig: + // Configure the factory with custom timeout. + timeout := os.Getenv("STACKROX_RHEL_VEX_COMPRESSED_FILE_TIMEOUT") + if timeout != "" { + parsedTimeout, err := time.ParseDuration(timeout) + if err != nil { + zlog.Warn(ctx). + Err(err). + Msg("using default STACKROX_RHEL_VEX_COMPRESSED_FILE_TIMEOUT due to invalid duration") + } else { + cfg.CompressedFileTimeout = claircore.Duration(parsedTimeout) + zlog.Info(ctx). + Str("timeout", parsedTimeout.String()). + Msg("using compressed file timeout") + } + } + case *vex.UpdaterConfig: + // Updater config - nothing to configure here. + default: + return fmt.Errorf("rhel-vex: unexpected config type: %T", i) + } + return nil + }, + }), + } +} + // TODO(ROX-26672): remove this. func redhatCSAFOpts() []updates.ManagerOption { return []updates.ManagerOption{ From 322ff13347c63bfa668a5373c3d5cc6c69482a73 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 10 Feb 2026 18:39:20 +0100 Subject: [PATCH 153/232] ROX-32817: isolate & parallelize tests (#18866) --- tests/compliance_operator_v2_test.go | 294 ++++++++++++++------------- 1 file changed, 154 insertions(+), 140 deletions(-) diff --git a/tests/compliance_operator_v2_test.go b/tests/compliance_operator_v2_test.go index 4e23fd4b76e66..ef99027b89329 100644 --- a/tests/compliance_operator_v2_test.go +++ b/tests/compliance_operator_v2_test.go @@ -4,7 +4,6 @@ package tests import ( "context" - "errors" "fmt" "testing" "time" @@ -15,7 +14,6 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" v2 "github.com/stackrox/rox/generated/api/v2" "github.com/stackrox/rox/pkg/protoconv/schedule" - "github.com/stackrox/rox/pkg/retry" "github.com/stackrox/rox/pkg/testutils" "github.com/stackrox/rox/pkg/testutils/centralgrpc" "github.com/stackrox/rox/pkg/uuid" @@ -46,9 +44,6 @@ const ( ) var ( - scanName = "sync-test" - initialProfiles = []string{"ocp4-cis"} - updatedProfiles = []string{"ocp4-high", "ocp4-cis-node"} initialSchedule = &v2.Schedule{ Hour: 12, Minute: 0, @@ -69,18 +64,9 @@ var ( }, }, } - scanConfig = v2.ComplianceScanConfiguration{ - ScanName: scanName, - ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ - Description: scanName, - OneTimeScan: false, - Profiles: initialProfiles, - ScanSchedule: initialSchedule, - }, - } ) -func scaleToN(ctx context.Context, client kubernetes.Interface, deploymentName string, namespace string, replicas int32) (err error) { +func scaleToN(ctx context.Context, t *testing.T, client kubernetes.Interface, deploymentName string, namespace string, replicas int32) { scaleRequest := &autoscalingV1.Scale{ Spec: autoscalingV1.ScaleSpec{ Replicas: replicas, @@ -91,11 +77,10 @@ func scaleToN(ctx context.Context, client kubernetes.Interface, deploymentName s }, } - _, err = client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scaleRequest, metav1.UpdateOptions{}) - if err != nil { - return retry.MakeRetryable(err) - } - return nil + require.EventuallyWithT(t, func(c *assert.CollectT) { + _, err := client.AppsV1().Deployments(namespace).UpdateScale(ctx, deploymentName, scaleRequest, metav1.UpdateOptions{}) + require.NoErrorf(c, err, "failed to scale %q to %q replicas", deploymentName, replicas) + }, defaultTimeout, defaultInterval) } func createDynamicClient(t testutils.T) dynclient.Client { @@ -131,8 +116,7 @@ func createDynamicClient(t testutils.T) dynclient.Client { return client } -func waitForComplianceSuiteToComplete(t *testing.T, suiteName string, interval, timeout time.Duration) { - client := createDynamicClient(t) +func waitForComplianceSuiteToComplete(t *testing.T, client dynclient.Client, suiteName string, interval, timeout time.Duration) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -154,29 +138,39 @@ func waitForComplianceSuiteToComplete(t *testing.T, suiteName string, interval, t.Logf("ComplianceSuite %s has reached DONE phase", suiteName) } -func cleanUpResources(ctx context.Context, t *testing.T, resourceName string, namespace string) { - client := createDynamicClient(t) - scanSetting := &complianceoperatorv1.ScanSetting{} - scanSettingBinding := &complianceoperatorv1.ScanSettingBinding{} - err := client.Get(ctx, types.NamespacedName{Name: resourceName, Namespace: namespace}, scanSetting) - if err == nil { - _ = client.Delete(ctx, scanSetting) - } - err = client.Get(ctx, types.NamespacedName{Name: resourceName, Namespace: namespace}, scanSettingBinding) - if err == nil { - _ = client.Delete(ctx, scanSettingBinding) - } +func deleteResource[T any, PT interface { + dynclient.Object + *T +}](ctx context.Context, t *testing.T, client dynclient.Client, name, namespace string) { + key := types.NamespacedName{Name: name, Namespace: namespace} + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + var obj T + ptr := PT(&obj) + ptr.SetName(name) + ptr.SetNamespace(namespace) + err := client.Delete(ctx, ptr) + if err != nil && !errors2.IsNotFound(err) { + t.Logf("failed to delete %T %s/%s: %v", ptr, namespace, name, err) + } + err = client.Get(ctx, key, ptr) + require.True(c, errors2.IsNotFound(err), "%T %s/%s still exists", ptr, namespace, name) + }, defaultTimeout, defaultInterval) +} + +func cleanUpResources(ctx context.Context, t *testing.T, client dynclient.Client, resourceName string, namespace string) { + deleteResource[complianceoperatorv1.ScanSettingBinding](ctx, t, client, resourceName, namespace) + deleteResource[complianceoperatorv1.ScanSetting](ctx, t, client, resourceName, namespace) } func assertResourceDoesNotExist[T any, PT interface { dynclient.Object *T -}](ctx context.Context, t testutils.T, name, namespace string) { - client := createDynamicClient(t) - require.Eventually(t, func() bool { +}](ctx context.Context, t testutils.T, client dynclient.Client, name, namespace string) { + require.EventuallyWithT(t, func(c *assert.CollectT) { var obj T err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, PT(&obj)) - return errors2.IsNotFound(err) + require.True(c, errors2.IsNotFound(err), "%T %s/%s still exists", obj, namespace, name) }, defaultTimeout, defaultInterval) } @@ -210,17 +204,20 @@ func assertScanSettingBinding(ctx context.Context, t testutils.T, client dynclie assert.Equal(t, scanSettingBinding.Annotations["owner"], "stackrox") } -func waitForDeploymentReady(ctx context.Context, t *testing.T, name string, namespace string, numReplicas int32) { - client := createDynamicClient(t) - require.Eventually(t, func() bool { +func waitForDeploymentReady(ctx context.Context, t *testing.T, client dynclient.Client, name string, namespace string, numReplicas int32) { + require.EventuallyWithT(t, func(c *assert.CollectT) { deployment := &appsv1.Deployment{} - return client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, deployment) == nil && deployment.Status.ReadyReplicas == numReplicas + err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, deployment) + require.NoErrorf(c, err, "failed to get deployment %q", name) + require.Equal(c, numReplicas, deployment.Status.ReadyReplicas) }, defaultTimeout, defaultInterval) } +// Run this test outside of other parllel tests because of the Sensor side effects. func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { ctx := context.Background() k8sClient := createK8sClient(t) + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) // Create the ScanConfiguration service @@ -233,12 +230,22 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { require.Greater(t, len(clusters.GetClusters()), 0) clusterID := clusters.GetClusters()[0].GetId() - // Set the cluster ID - scanConfig.Clusters = []string{clusterID} + // Create local scan config with UUID-based name for test isolation + scanName := fmt.Sprintf("sync-test-%s", uuid.NewV4().String()) + scanConfig := v2.ComplianceScanConfiguration{ + ScanName: scanName, + Clusters: []string{clusterID}, + ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ + Description: scanName, + OneTimeScan: false, + Profiles: []string{"ocp4-cis"}, + ScanSchedule: initialSchedule, + }, + } // Scale down Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 0)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 0) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 0) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 0) // Create ScanConfig in Central res, err := scanConfigService.CreateComplianceScanConfiguration(ctx, &scanConfig) @@ -250,44 +257,43 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { Id: res.GetId(), } _, _ = scanConfigService.DeleteComplianceScanConfiguration(ctx, reqDelete) - cleanUpResources(ctx, t, scanName, coNamespaceV2) + cleanUpResources(ctx, t, dynClient, scanName, coNamespaceV2) }) // Scale up Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 1)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 1) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 1) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 1) // Assert the ScanSetting and the ScanSettingBinding are created - client := createDynamicClient(t) assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) - assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + assertScanSetting(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, &scanConfig) + assertScanSettingBinding(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, &scanConfig) }, defaultTimeout, defaultInterval) // Scale down Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 0)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 0) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 0) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 0) // Update the ScanConfig in Central scanConfig.Id = res.GetId() - scanConfig.ScanConfig.Profiles = updatedProfiles + scanConfig.ScanConfig.Profiles = []string{"ocp4-pci-dss", "ocp4-pci-dss-node"} scanConfig.ScanConfig.ScanSchedule = updatedSchedule _, err = scanConfigService.UpdateComplianceScanConfiguration(ctx, &scanConfig) assert.NoError(t, err) // Scale up Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 1)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 1) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 1) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 1) // Assert the ScanSetting and the ScanSettingBinding are updated assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) - assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, &scanConfig) + assertScanSetting(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, &scanConfig) + assertScanSettingBinding(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, &scanConfig) }, defaultTimeout, defaultInterval) // Scale down Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 0)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 0) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 0) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 0) // Delete the ScanConfig in Central reqDelete := &v2.ResourceByID{ @@ -296,56 +302,50 @@ func TestComplianceV2CentralSendsScanConfiguration(t *testing.T) { _, err = scanConfigService.DeleteComplianceScanConfiguration(ctx, reqDelete) // Scale up Sensor - assert.NoError(t, scaleToN(ctx, k8sClient, "sensor", stackroxNamespace, 1)) - waitForDeploymentReady(ctx, t, "sensor", stackroxNamespace, 1) + scaleToN(ctx, t, k8sClient, "sensor", stackroxNamespace, 1) + waitForDeploymentReady(ctx, t, dynClient, "sensor", stackroxNamespace, 1) // Assert the ScanSetting and the ScanSettingBinding are deleted - assertResourceDoesNotExist[complianceoperatorv1.ScanSetting](ctx, t, scanName, coNamespaceV2) - assertResourceDoesNotExist[complianceoperatorv1.ScanSettingBinding](ctx, t, scanName, coNamespaceV2) + assertResourceDoesNotExist[complianceoperatorv1.ScanSetting](ctx, t, dynClient, scanName, coNamespaceV2) + assertResourceDoesNotExist[complianceoperatorv1.ScanSettingBinding](ctx, t, dynClient, scanName, coNamespaceV2) } // ACS API test suite for integration testing for the Compliance Operator. func TestComplianceV2Integration(t *testing.T) { + t.Parallel() resp := getIntegrations(t) - assert.Len(t, resp.Integrations, 1, "failed to assert there is only a single compliance integration") - assert.Equal(t, resp.Integrations[0].ClusterName, "remote", "failed to find integration for cluster called \"remote\"") - assert.Equal(t, resp.Integrations[0].Namespace, "openshift-compliance", "failed to find integration for \"openshift-compliance\" namespace") + assert.Equal(t, resp.GetIntegrations()[0].GetClusterName(), "remote", "failed to find integration for cluster called \"remote\"") + assert.Equal(t, resp.GetIntegrations()[0].GetNamespace(), "openshift-compliance", "failed to find integration for \"openshift-compliance\" namespace") } func TestComplianceV2ProfileGet(t *testing.T) { + t.Parallel() conn := centralgrpc.GRPCConnectionToCentral(t) client := v2.NewComplianceProfileServiceClient(conn) - // Get the clusters - resp := getIntegrations(t) - assert.Len(t, resp.Integrations, 1, "failed to assert there is only a single compliance integration") - // Get the profiles for the cluster - clusterID := resp.Integrations[0].ClusterId + clusterID := getIntegrations(t).GetIntegrations()[0].GetClusterId() profileList, err := client.ListComplianceProfiles(context.TODO(), &v2.ProfilesForClusterRequest{ClusterId: clusterID}) - assert.Greater(t, len(profileList.Profiles), 0, "failed to assert the cluster has profiles") + assert.Greater(t, len(profileList.GetProfiles()), 0, "failed to assert the cluster has profiles") // Now take the ID from one of the cluster profiles to get the specific profile. - profile, err := client.GetComplianceProfile(context.TODO(), &v2.ResourceByID{Id: profileList.Profiles[0].Id}) + profile, err := client.GetComplianceProfile(context.TODO(), &v2.ResourceByID{Id: profileList.GetProfiles()[0].GetId()}) if err != nil { t.Fatal(err) } - assert.Greater(t, len(profile.Rules), 0, "failed to verify the selected profile contains any rules") + assert.Greater(t, len(profile.GetRules()), 0, "failed to verify the selected profile contains any rules") } func TestComplianceV2ProfileGetSummaries(t *testing.T) { + t.Parallel() conn := centralgrpc.GRPCConnectionToCentral(t) client := v2.NewComplianceProfileServiceClient(conn) - // Get the clusters - resp := getIntegrations(t) - assert.Len(t, resp.Integrations, 1, "failed to assert there is only a single compliance integration") - // Get the profiles for the cluster - clusterID := resp.Integrations[0].ClusterId + clusterID := getIntegrations(t).GetIntegrations()[0].GetClusterId() profileSummaries, err := client.ListProfileSummaries(context.TODO(), &v2.ClustersProfileSummaryRequest{ClusterIds: []string{clusterID}}) assert.NoError(t, err) - assert.Greater(t, len(profileSummaries.Profiles), 0, "failed to assert the cluster has profiles") + assert.Greater(t, len(profileSummaries.GetProfiles()), 0, "failed to assert the cluster has profiles") } // Helper to get the integrations as the cluster id is needed in many API calls @@ -358,20 +358,22 @@ func getIntegrations(t *testing.T) *v2.ListComplianceIntegrationsResponse { if err != nil { t.Fatal(err) } - assert.Len(t, resp.Integrations, 1, "failed to assert there is only a single compliance integration") + require.Len(t, resp.GetIntegrations(), 1, "failed to assert there is only a single compliance integration") return resp } func TestComplianceV2CreateGetScanConfigurations(t *testing.T) { + t.Parallel() ctx := context.Background() + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) scanConfigService := v2.NewComplianceScanConfigurationServiceClient(conn) serviceCluster := v1.NewClustersServiceClient(conn) clusters, err := serviceCluster.GetClusters(ctx, &v1.GetClustersRequest{}) assert.NoError(t, err) clusterID := clusters.GetClusters()[0].GetId() - testName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + testName := fmt.Sprintf("create-get-%s", uuid.NewV4().String()) req := &v2.ComplianceScanConfiguration{ ScanName: testName, Id: "", @@ -396,39 +398,36 @@ func TestComplianceV2CreateGetScanConfigurations(t *testing.T) { resp, err := scanConfigService.CreateComplianceScanConfiguration(ctx, req) assert.NoError(t, err) assert.Equal(t, req.GetScanName(), resp.GetScanName()) + t.Cleanup(func() { + _ = deleteScanConfig(ctx, resp.GetId(), scanConfigService) + cleanUpResources(ctx, t, dynClient, testName, coNamespaceV2) + }) query := &v2.RawQuery{Query: ""} scanConfigs, err := scanConfigService.ListComplianceScanConfigurations(ctx, query) assert.NoError(t, err) assert.GreaterOrEqual(t, len(scanConfigs.GetConfigurations()), 1) - assert.GreaterOrEqual(t, scanConfigs.TotalCount, int32(1)) - - configs := scanConfigs.GetConfigurations() - scanconfigID := getscanConfigID(testName, configs) - defer deleteScanConfig(ctx, scanconfigID, scanConfigService) + assert.GreaterOrEqual(t, scanConfigs.GetTotalCount(), int32(1)) serviceResult := v2.NewComplianceResultsServiceClient(conn) query = &v2.RawQuery{Query: ""} - err = retry.WithRetry(func() error { + require.EventuallyWithT(t, func(c *assert.CollectT) { results, err := serviceResult.GetComplianceScanResults(ctx, query) - if err != nil { - return err - } + require.NoError(c, err) resultsList := results.GetScanResults() - for i := 0; i < len(resultsList); i++ { - if resultsList[i].GetScanName() == testName { - return nil + var found bool + for _, result := range resultsList { + if result.GetScanName() == testName { + found = true + break } } - return errors.New("scan result not found") - }, retry.BetweenAttempts(func(previousAttemptNumber int) { - time.Sleep(60 * time.Second) - }), retry.Tries(10)) - assert.NoError(t, err) + require.True(c, found, "scan result not found for %s", testName) + }, 10*time.Minute, 30*time.Second) // Create a different scan configuration with the same profile - duplicateTestName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + duplicateTestName := fmt.Sprintf("create-get-dup-%s", uuid.NewV4().String()) duplicateProfileReq := &v2.ComplianceScanConfiguration{ ScanName: duplicateTestName, Id: "", @@ -457,11 +456,13 @@ func TestComplianceV2CreateGetScanConfigurations(t *testing.T) { query = &v2.RawQuery{Query: ""} scanConfigs, err = scanConfigService.ListComplianceScanConfigurations(ctx, query) assert.NoError(t, err) - assert.Equal(t, len(scanConfigs.GetConfigurations()), 1) + // Verify the original config exists but duplicate was not created + assert.NotEmpty(t, getscanConfigID(testName, scanConfigs.GetConfigurations()), "expected original scan config %s to exist", testName) + assert.Empty(t, getscanConfigID(duplicateTestName, scanConfigs.GetConfigurations()), "expected duplicate scan config %s to not exist", duplicateTestName) // Create a scan configuration with profiles with different products (rhcos, cis-node). // This should be valid in version >= 4.9 - differentProductProfileTestName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + differentProductProfileTestName := fmt.Sprintf("create-get-diffprod-%s", uuid.NewV4().String()) differentProductProfileReq := &v2.ComplianceScanConfiguration{ ScanName: differentProductProfileTestName, Id: "", @@ -485,18 +486,23 @@ func TestComplianceV2CreateGetScanConfigurations(t *testing.T) { res, err := scanConfigService.CreateComplianceScanConfiguration(ctx, differentProductProfileReq) require.NoError(t, err) + t.Cleanup(func() { + _ = deleteScanConfig(ctx, res.GetId(), scanConfigService) + cleanUpResources(ctx, t, dynClient, differentProductProfileTestName, coNamespaceV2) + }) query = &v2.RawQuery{Query: ""} scanConfigs, err = scanConfigService.ListComplianceScanConfigurations(ctx, query) assert.NoError(t, err) - assert.Equal(t, len(scanConfigs.GetConfigurations()), 2) - - _, err = scanConfigService.DeleteComplianceScanConfiguration(ctx, &v2.ResourceByID{Id: res.GetId()}) - require.NoError(t, err) + // Verify both scan configs exist + assert.NotEmpty(t, getscanConfigID(testName, scanConfigs.GetConfigurations()), "expected original scan config %s to exist", testName) + assert.NotEmpty(t, getscanConfigID(differentProductProfileTestName, scanConfigs.GetConfigurations()), "expected different product scan config %s to exist", differentProductProfileTestName) } func TestComplianceV2UpdateScanConfigurations(t *testing.T) { + t.Parallel() ctx := context.Background() + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) scanConfigService := v2.NewComplianceScanConfigurationServiceClient(conn) serviceCluster := v1.NewClustersServiceClient(conn) @@ -506,14 +512,14 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { clusterID := clusters.GetClusters()[0].GetId() // Create a scan configuration - scanName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + scanName := fmt.Sprintf("update-%s", uuid.NewV4().String()) req := &v2.ComplianceScanConfiguration{ ScanName: scanName, Id: "", Clusters: []string{clusterID}, ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ OneTimeScan: false, - Profiles: []string{"ocp4-cis"}, + Profiles: []string{"ocp4-moderate"}, Description: "test config", ScanSchedule: &v2.Schedule{ IntervalType: 1, @@ -533,20 +539,19 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { assert.Equal(t, req.GetScanName(), resp.GetScanName()) t.Cleanup(func() { _ = deleteScanConfig(ctx, resp.GetId(), scanConfigService) - cleanUpResources(ctx, t, req.GetScanName(), coNamespaceV2) + cleanUpResources(ctx, t, dynClient, req.GetScanName(), coNamespaceV2) }) query := &v2.RawQuery{Query: ""} scanConfigs, err := scanConfigService.ListComplianceScanConfigurations(ctx, query) assert.NoError(t, err) require.GreaterOrEqual(t, len(scanConfigs.GetConfigurations()), 1) - assert.GreaterOrEqual(t, scanConfigs.TotalCount, int32(1)) + assert.GreaterOrEqual(t, scanConfigs.GetTotalCount(), int32(1)) // Assert the ScanSetting and the ScanSettingBinding are created - client := createDynamicClient(t) assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, req) - assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, req) + assertScanSetting(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, req) + assertScanSettingBinding(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, req) }, defaultTimeout, defaultInterval) // Update the scan configuration @@ -562,7 +567,7 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { }, }, } - updateReq.ScanConfig.Profiles = []string{"ocp4-high", "ocp4-high-node"} + updateReq.ScanConfig.Profiles = []string{"ocp4-moderate-node"} _, err = scanConfigService.UpdateComplianceScanConfiguration(ctx, updateReq) assert.NoError(t, err) @@ -570,17 +575,19 @@ func TestComplianceV2UpdateScanConfigurations(t *testing.T) { scanConfigs, err = scanConfigService.ListComplianceScanConfigurations(ctx, query) assert.NoError(t, err) assert.GreaterOrEqual(t, len(scanConfigs.GetConfigurations()), 1) - assert.GreaterOrEqual(t, scanConfigs.TotalCount, int32(1)) + assert.GreaterOrEqual(t, scanConfigs.GetTotalCount(), int32(1)) // Assert the ScanSetting and the ScanSettingBinding are updated assert.EventuallyWithT(t, func(c *assert.CollectT) { - assertScanSetting(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, updateReq) - assertScanSettingBinding(ctx, wrapCollectT(t, c), client, scanName, coNamespaceV2, updateReq) + assertScanSetting(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, updateReq) + assertScanSettingBinding(ctx, wrapCollectT(t, c), dynClient, scanName, coNamespaceV2, updateReq) }, defaultTimeout, defaultInterval) } func TestComplianceV2DeleteComplianceScanConfigurations(t *testing.T) { + t.Parallel() ctx := context.Background() + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) scanConfigService := v2.NewComplianceScanConfigurationServiceClient(conn) // Retrieve the results from the scan configuration once the scan is complete @@ -589,14 +596,14 @@ func TestComplianceV2DeleteComplianceScanConfigurations(t *testing.T) { assert.NoError(t, err) clusterID := clusters.GetClusters()[0].GetId() - testName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + testName := fmt.Sprintf("delete-%s", uuid.NewV4().String()) req := &v2.ComplianceScanConfiguration{ ScanName: testName, Id: "", Clusters: []string{clusterID}, ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ OneTimeScan: false, - Profiles: []string{"rhcos4-e8"}, + Profiles: []string{"rhcos4-high"}, Description: "test config", ScanSchedule: &v2.Schedule{ IntervalType: 1, @@ -614,6 +621,10 @@ func TestComplianceV2DeleteComplianceScanConfigurations(t *testing.T) { resp, err := scanConfigService.CreateComplianceScanConfiguration(ctx, req) assert.NoError(t, err) assert.Equal(t, req.GetScanName(), resp.GetScanName()) + t.Cleanup(func() { + _ = deleteScanConfig(ctx, resp.GetId(), scanConfigService) + cleanUpResources(ctx, t, dynClient, testName, coNamespaceV2) + }) query := &v2.RawQuery{Query: ""} scanConfigs, err := scanConfigService.ListComplianceScanConfigurations(ctx, query) @@ -633,21 +644,23 @@ func TestComplianceV2DeleteComplianceScanConfigurations(t *testing.T) { } func TestComplianceV2ComplianceObjectMetadata(t *testing.T) { + t.Parallel() ctx := context.Background() + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) scanConfigService := v2.NewComplianceScanConfigurationServiceClient(conn) serviceCluster := v1.NewClustersServiceClient(conn) clusters, err := serviceCluster.GetClusters(ctx, &v1.GetClustersRequest{}) assert.NoError(t, err) clusterID := clusters.GetClusters()[0].GetId() - testName := fmt.Sprintf("test-%s", uuid.NewV4().String()) + testName := fmt.Sprintf("metadata-%s", uuid.NewV4().String()) req := &v2.ComplianceScanConfiguration{ ScanName: testName, Id: "", Clusters: []string{clusterID}, ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ OneTimeScan: false, - Profiles: []string{"rhcos4-e8"}, + Profiles: []string{"rhcos4-nerc-cip"}, Description: "test config", ScanSchedule: &v2.Schedule{ IntervalType: 1, @@ -665,21 +678,23 @@ func TestComplianceV2ComplianceObjectMetadata(t *testing.T) { resp, err := scanConfigService.CreateComplianceScanConfiguration(ctx, req) assert.NoError(t, err) assert.Equal(t, req.GetScanName(), resp.GetScanName()) + t.Cleanup(func() { + _ = deleteScanConfig(ctx, resp.GetId(), scanConfigService) + cleanUpResources(ctx, t, dynClient, testName, coNamespaceV2) + }) query := &v2.RawQuery{Query: ""} scanConfigs, err := scanConfigService.ListComplianceScanConfigurations(ctx, query) configs := scanConfigs.GetConfigurations() - scanconfigID := getscanConfigID(testName, configs) - defer deleteScanConfig(ctx, scanconfigID, scanConfigService) + _ = getscanConfigID(testName, configs) // verify config exists // Ensure the ScanSetting and ScanSettingBinding have ACS metadata - client := createDynamicClient(t) var scanSetting complianceoperatorv1.ScanSetting require.EventuallyWithT(t, func(c *assert.CollectT) { callCtx, callCancel := context.WithTimeout(ctx, 10*time.Second) defer callCancel() - err := client.Get(callCtx, + err := dynClient.Get(callCtx, types.NamespacedName{Name: testName, Namespace: "openshift-compliance"}, &scanSetting, ) @@ -692,7 +707,7 @@ func TestComplianceV2ComplianceObjectMetadata(t *testing.T) { assert.Equal(t, scanSetting.Annotations["owner"], "stackrox") var scanSettingBinding complianceoperatorv1.ScanSetting - err = client.Get(context.TODO(), types.NamespacedName{Name: testName, Namespace: "openshift-compliance"}, &scanSettingBinding) + err = dynClient.Get(context.TODO(), types.NamespacedName{Name: testName, Namespace: "openshift-compliance"}, &scanSettingBinding) require.NoError(t, err, "failed to get ScanSettingBinding %s", testName) assert.Contains(t, scanSettingBinding.Labels, "app.kubernetes.io/name") assert.Equal(t, scanSettingBinding.Labels["app.kubernetes.io/name"], "stackrox") @@ -719,16 +734,13 @@ func getscanConfigID(configName string, scanConfigs []*v2.ComplianceScanConfigur } func TestComplianceV2ScheduleRescan(t *testing.T) { + t.Parallel() + dynClient := createDynamicClient(t) conn := centralgrpc.GRPCConnectionToCentral(t) client := v2.NewComplianceScanConfigurationServiceClient(conn) - integrationClient := v2.NewComplianceIntegrationServiceClient(conn) - resp, err := integrationClient.ListComplianceIntegrations(context.TODO(), &v2.RawQuery{Query: ""}) - if err != nil { - t.Fatal(err) - } - clusterId := resp.Integrations[0].ClusterId + clusterId := getIntegrations(t).GetIntegrations()[0].GetClusterId() - scanConfigName := "e8-scan-schedule" + scanConfigName := fmt.Sprintf("schedule-rescan-%s", uuid.NewV4().String()) sc := v2.ComplianceScanConfiguration{ ScanName: scanConfigName, ScanConfig: &v2.BaseComplianceScanConfigurationSettings{ @@ -739,7 +751,7 @@ func TestComplianceV2ScheduleRescan(t *testing.T) { Hour: 0, Minute: 0, }, - Description: "Scan schedule for the Austrailian Essential Eight profile to run daily.", + Description: "Scan schedule for the Australian Essential Eight profile to run daily.", }, Clusters: []string{clusterId}, } @@ -747,15 +759,17 @@ func TestComplianceV2ScheduleRescan(t *testing.T) { if err != nil { t.Fatal(err) } + t.Cleanup(func() { + _, _ = client.DeleteComplianceScanConfiguration(context.TODO(), &v2.ResourceByID{Id: scanConfig.GetId()}) + cleanUpResources(context.Background(), t, dynClient, scanConfigName, coNamespaceV2) + }) - defer client.DeleteComplianceScanConfiguration(context.TODO(), &v2.ResourceByID{Id: scanConfig.GetId()}) - - waitForComplianceSuiteToComplete(t, scanConfig.ScanName, waitForDoneInterval, waitForDoneTimeout) + waitForComplianceSuiteToComplete(t, dynClient, scanConfig.GetScanName(), waitForDoneInterval, waitForDoneTimeout) // Invoke a rescan _, err = client.RunComplianceScanConfiguration(context.TODO(), &v2.ResourceByID{Id: scanConfig.GetId()}) require.NoError(t, err, "failed to rerun scan schedule %s", scanConfigName) // Assert the scan is rerunning on the cluster using the Compliance Operator CRDs - waitForComplianceSuiteToComplete(t, scanConfig.ScanName, waitForDoneInterval, waitForDoneTimeout) + waitForComplianceSuiteToComplete(t, dynClient, scanConfig.GetScanName(), waitForDoneInterval, waitForDoneTimeout) } From ff8c99a168596a3b3f6553a4ee95dca713343e7d Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 10 Feb 2026 19:33:40 +0100 Subject: [PATCH 154/232] ROX-33012: fix cli release pipeline (#18939) --- Makefile | 2 +- image/rhel/konflux.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 16d764410c463..63c4ad12659c8 100644 --- a/Makefile +++ b/Makefile @@ -429,7 +429,7 @@ endif roxagent: roxagent-build .PHONY: cli-build -cli-build: roxctl-build +cli-build: roxctl-build roxagent-build .PHONY: cli-install cli-install: roxctl-install diff --git a/image/rhel/konflux.Dockerfile b/image/rhel/konflux.Dockerfile index 1893b3c3192ce..c4d973e41de54 100644 --- a/image/rhel/konflux.Dockerfile +++ b/image/rhel/konflux.Dockerfile @@ -20,7 +20,7 @@ ENV CI=1 # TODO(ROX-13200): make sure roxctl cli is built without running go mod tidy. # CLI builds are without strictfipsruntime (and CGO_ENABLED is set to 0) because these binaries are for user download and use outside the cluster. -RUN make cli-build +RUN make roxctl-build ENV CGO_ENABLED=1 # TODO(ROX-27054): Remove the redundant strictfipsruntime option if one is found to be so. From 6462651948bf49d922b132aa893a90453bc74ed1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Feb 2026 16:35:51 -0500 Subject: [PATCH 155/232] chore(deps): bump github.com/klauspost/compress from 1.18.3 to 1.18.4 (#18937) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a982013e39344..272c94f58ee0c 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/jackc/pgx/v5 v5.8.0 github.com/jeremywohl/flatten v1.0.1 github.com/joshdk/go-junit v1.0.0 - github.com/klauspost/compress v1.18.3 + github.com/klauspost/compress v1.18.4 github.com/lib/pq v1.11.1 github.com/machinebox/graphql v0.2.2 github.com/mailru/easyjson v0.9.1 diff --git a/go.sum b/go.sum index 87b57b89dd67b..244138cb3ce98 100644 --- a/go.sum +++ b/go.sum @@ -1066,8 +1066,8 @@ github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 h1:veS9QfglfvqAw github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= -github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c= +github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= From 6cba9d87a1ce6e052440edc601a35efac0a79600 Mon Sep 17 00:00:00 2001 From: Brad Rogers <61400697+bradr5@users.noreply.github.com> Date: Tue, 10 Feb 2026 15:56:20 -0600 Subject: [PATCH 156/232] ROX-32018: Fix compliance coverage page crash (#18948) --- .../Containers/ComplianceEnhanced/Coverage/CoveragePage.tsx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ui/apps/platform/src/Containers/ComplianceEnhanced/Coverage/CoveragePage.tsx b/ui/apps/platform/src/Containers/ComplianceEnhanced/Coverage/CoveragePage.tsx index a8af2f1e485ed..d6f687797252e 100644 --- a/ui/apps/platform/src/Containers/ComplianceEnhanced/Coverage/CoveragePage.tsx +++ b/ui/apps/platform/src/Containers/ComplianceEnhanced/Coverage/CoveragePage.tsx @@ -35,7 +35,7 @@ function CoverageContent() { ); } - if (!isLoading && scanConfigProfilesResponse.totalCount === 0) { + if (!isLoading && scanConfigProfilesResponse.profiles.length === 0) { return ; } @@ -66,6 +66,10 @@ function ProfilesRedirectHandler() { ); } + if (!firstProfile) { + return ; + } + return ; } From adfed4cd9a879b0e86be76f39086676e1e9021e4 Mon Sep 17 00:00:00 2001 From: David House <105243888+davdhacs@users.noreply.github.com> Date: Tue, 10 Feb 2026 23:46:02 -0700 Subject: [PATCH 157/232] fix(ci): upgrade envtest (#18958) --- operator/tools/envtest/go.mod | 20 ++--- operator/tools/envtest/go.sum | 143 +++++++++++++--------------------- 2 files changed, 64 insertions(+), 99 deletions(-) diff --git a/operator/tools/envtest/go.mod b/operator/tools/envtest/go.mod index 6586fc3461a54..e72e7bf2b7868 100644 --- a/operator/tools/envtest/go.mod +++ b/operator/tools/envtest/go.mod @@ -1,15 +1,17 @@ module github.com/stackrox/rox/operator/tools/envtest -go 1.25 +go 1.25.0 -require sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e +require sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20260209172322-2053ba3d414e require ( - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/zapr v1.2.4 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - go.uber.org/multierr v1.10.0 // indirect - go.uber.org/zap v1.26.0 // indirect - golang.org/x/text v0.12.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.1 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/text v0.34.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/operator/tools/envtest/go.sum b/operator/tools/envtest/go.sum index e5e52d5bea292..cbf375b7a075c 100644 --- a/operator/tools/envtest/go.sum +++ b/operator/tools/envtest/go.sum @@ -1,97 +1,60 @@ -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA= -github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/onsi/ginkgo/v2 v2.27.4 h1:fcEcQW/A++6aZAZQNUmNjvA9PSOzefMJBerHJ4t8v8Y= +github.com/onsi/ginkgo/v2 v2.27.4/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.39.0 h1:y2ROC3hKFmQZJNFeGAMeHZKkjBL65mIZcvrLQBF9k6Q= +github.com/onsi/gomega v1.39.0/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e h1:zlN3M47kIntFr5Z6qMOSMg8nO6lrywD94H29TPDZjZk= -sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240215124517-56159419231e/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= +k8s.io/apimachinery v0.36.0-alpha.1 h1:MrQLU+TD3A2/ywQiTHEJ5BEKKk3XHpy0RkT98V0XdKI= +k8s.io/apimachinery v0.36.0-alpha.1/go.mod h1:hQkG060WLAG1TIkYsu5lj3tb6YdNpKe5Zrr2UPGg+/k= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20260209172322-2053ba3d414e h1:DDMrrZMTI9XOXbEQ3FXH2UHlpCg8XbDD3YXJNb6TWn4= +sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20260209172322-2053ba3d414e/go.mod h1:IlbgWQkCYbpbkygXxnd/sLJWL9WQk9NvvVhkJCm5P7Q= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From e07e4afd240f6a94c9540f50e17def099bfcfd7a Mon Sep 17 00:00:00 2001 From: "red-hat-konflux[bot]" <126015336+red-hat-konflux[bot]@users.noreply.github.com> Date: Wed, 11 Feb 2026 08:13:28 +0100 Subject: [PATCH 158/232] chore(deps): update konflux references (#18861) Signed-off-by: red-hat-konflux <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: red-hat-konflux[bot] <126015336+red-hat-konflux[bot]@users.noreply.github.com> Co-authored-by: Tom Martensen --- .tekton/basic-component-pipeline.yaml | 69 ++++----------------------- .tekton/main-pipeline.yaml | 69 ++++----------------------- .tekton/operator-bundle-pipeline.yaml | 42 ++++------------ .tekton/scanner-v4-pipeline.yaml | 69 ++++----------------------- 4 files changed, 36 insertions(+), 213 deletions(-) diff --git a/.tekton/basic-component-pipeline.yaml b/.tekton/basic-component-pipeline.yaml index d7dc0dab2dde3..3d7dcc3bb1a47 100644 --- a/.tekton/basic-component-pipeline.yaml +++ b/.tekton/basic-component-pipeline.yaml @@ -77,10 +77,6 @@ spec: description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -159,15 +155,6 @@ spec: - name: init params: - - name: image-url - # We can't provide a StackRox-style tag because it is not known at this time (requires cloning source, etc.) - # As a workaround, we still provide a unique tag that's based on a revision in order for this task to comply with - # its expected input. We later actually add this tag on a built image with the apply-index-image-tag task. - value: $(params.output-image-repo):konflux-$(params.revision) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) - name: enable-cache-proxy value: $(params.enable-cache-proxy) taskRef: @@ -175,7 +162,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -203,10 +190,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] workspaces: - name: basic-auth workspace: git-auth @@ -311,14 +294,10 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:f579f6d72db53da9e0b3f84e89d66d0f81c360a0785576308a93c3774b73be11 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - name: build-image-index params: @@ -338,39 +317,12 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8fd2bd1329a833cf1c3c049c0044961d6f5d5bfc954ebf188c19a56852c42600 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] runAfter: [build-images] - - name: apply-index-image-tag - params: - - name: IMAGE_URL - value: $(tasks.build-image-index.results.IMAGE_URL) - - name: IMAGE_DIGEST - value: $(tasks.build-image-index.results.IMAGE_DIGEST) - - name: ADDITIONAL_TAGS - value: - - konflux-$(params.revision) - taskRef: - params: - - name: name - value: apply-tags - - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - - name: kind - value: task - resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - - name: build-source-image params: - name: BINARY_IMAGE @@ -391,9 +343,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - input: $(params.build-source-image) operator: in values: ["true"] @@ -434,7 +383,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:b01d8e2c58eb407ac23fa07b8e44c4631f0cf7257e87507c829fa2486aff9804 - name: kind value: task resolver: bundles @@ -481,7 +430,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -505,7 +454,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -529,7 +478,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -554,7 +503,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:5b5b31eae9063a00b91acc049b536e548d87c730068e439eefe33ab5238ee118 - name: kind value: task resolver: bundles @@ -574,7 +523,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles diff --git a/.tekton/main-pipeline.yaml b/.tekton/main-pipeline.yaml index 780263d8ec5c8..b7e0ddf9d93ef 100644 --- a/.tekton/main-pipeline.yaml +++ b/.tekton/main-pipeline.yaml @@ -77,10 +77,6 @@ spec: description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -160,15 +156,6 @@ spec: - name: init params: - - name: image-url - # We can't provide a StackRox-style tag because it is not known at this time (requires cloning source, etc.) - # As a workaround, we still provide a unique tag that's based on a revision in order for this task to comply with - # its expected input. We later actually add this tag on a built image with the apply-index-image-tag task. - value: $(params.output-image-repo):konflux-$(params.revision) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) - name: enable-cache-proxy value: $(params.enable-cache-proxy) taskRef: @@ -176,7 +163,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -204,10 +191,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] workspaces: - name: basic-auth workspace: git-auth @@ -337,14 +320,10 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:f579f6d72db53da9e0b3f84e89d66d0f81c360a0785576308a93c3774b73be11 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] timeout: 2h0m0s - name: build-image-index @@ -365,39 +344,12 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8fd2bd1329a833cf1c3c049c0044961d6f5d5bfc954ebf188c19a56852c42600 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] runAfter: [build-images] - - name: apply-index-image-tag - params: - - name: IMAGE_URL - value: $(tasks.build-image-index.results.IMAGE_URL) - - name: IMAGE_DIGEST - value: $(tasks.build-image-index.results.IMAGE_DIGEST) - - name: ADDITIONAL_TAGS - value: - - konflux-$(params.revision) - taskRef: - params: - - name: name - value: apply-tags - - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - - name: kind - value: task - resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - - name: build-source-image params: - name: BINARY_IMAGE @@ -418,9 +370,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - input: $(params.build-source-image) operator: in values: ["true"] @@ -461,7 +410,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:b01d8e2c58eb407ac23fa07b8e44c4631f0cf7257e87507c829fa2486aff9804 - name: kind value: task resolver: bundles @@ -508,7 +457,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -532,7 +481,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -556,7 +505,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -581,7 +530,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:5b5b31eae9063a00b91acc049b536e548d87c730068e439eefe33ab5238ee118 - name: kind value: task resolver: bundles @@ -601,7 +550,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles diff --git a/.tekton/operator-bundle-pipeline.yaml b/.tekton/operator-bundle-pipeline.yaml index e7a1b0515a69d..44f67310d2f19 100644 --- a/.tekton/operator-bundle-pipeline.yaml +++ b/.tekton/operator-bundle-pipeline.yaml @@ -77,10 +77,6 @@ spec: description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -256,15 +252,6 @@ spec: - name: init params: - - name: image-url - # We can't provide a StackRox-style tag because it is not known at this time (requires cloning source, etc.) - # As a workaround, we still provide a unique tag that's based on a revision to this task to comply with its - # expected input. We later actually add this tag on a built image with apply-tags task. - value: $(params.output-image-repo):konflux-$(params.revision) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) - name: enable-cache-proxy value: $(params.enable-cache-proxy) taskRef: @@ -272,7 +259,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -300,10 +287,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] workspaces: - name: basic-auth workspace: git-auth @@ -520,14 +503,10 @@ spec: - name: name value: buildah-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.8@sha256:90cee1e47e3221f2bf1090e25228179ab7ac37dde0ffc8791805e63be2d3b4ab + value: quay.io/konflux-ci/tekton-catalog/task-buildah-oci-ta:0.8@sha256:dfeaa48d9706d9b3c304fe2182d37d402f5ab7eb9faa97d3690cee1f870002ff - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - name: apply-tags params: @@ -568,9 +547,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - input: $(params.build-source-image) operator: in values: ["true"] @@ -606,7 +582,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:b01d8e2c58eb407ac23fa07b8e44c4631f0cf7257e87507c829fa2486aff9804 - name: kind value: task resolver: bundles @@ -628,7 +604,7 @@ spec: - name: name value: fips-operator-bundle-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-fips-operator-bundle-check-oci-ta:0.1@sha256:056742bc951d5154ddce92accfe450360b7f3a19ec515dd7635a9f2824a76423 + value: quay.io/konflux-ci/tekton-catalog/task-fips-operator-bundle-check-oci-ta:0.1@sha256:2fde1d0d4b085468638df2a9ccfc22c3da0b507a6e4effb15f963d70e9b5eb2f - name: kind value: task resolver: bundles @@ -652,7 +628,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -676,7 +652,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -700,7 +676,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -720,7 +696,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:5b5b31eae9063a00b91acc049b536e548d87c730068e439eefe33ab5238ee118 - name: kind value: task resolver: bundles @@ -740,7 +716,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles diff --git a/.tekton/scanner-v4-pipeline.yaml b/.tekton/scanner-v4-pipeline.yaml index dd1f79054e652..36ecddec1ad21 100644 --- a/.tekton/scanner-v4-pipeline.yaml +++ b/.tekton/scanner-v4-pipeline.yaml @@ -77,10 +77,6 @@ spec: description: Path to the Dockerfile inside the context specified by parameter path-context name: dockerfile type: string - - default: "false" - description: Force rebuild image - name: rebuild - type: string - default: "false" description: Skip checks against built image name: skip-checks @@ -159,15 +155,6 @@ spec: - name: init params: - - name: image-url - # We can't provide a StackRox-style tag because it is not known at this time (requires cloning source, etc.) - # As a workaround, we still provide a unique tag that's based on a revision in order for this task to comply with - # its expected input. We later actually add this tag on a built image with the apply-index-image-tag task. - value: $(params.output-image-repo):konflux-$(params.revision) - - name: rebuild - value: $(params.rebuild) - - name: skip-checks - value: $(params.skip-checks) - name: enable-cache-proxy value: $(params.enable-cache-proxy) taskRef: @@ -175,7 +162,7 @@ spec: - name: name value: init - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:ebf06778aeacbbeb081f9231eafbdfdb8e380ad04e211d7ed80ae9101e37fd82 + value: quay.io/konflux-ci/tekton-catalog/task-init:0.3@sha256:aa6f8632cc23d605c5942505ff1d00280db16a6fda5c4c56c4ed9ae936b5fbc6 - name: kind value: task resolver: bundles @@ -203,10 +190,6 @@ spec: - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] workspaces: - name: basic-auth workspace: git-auth @@ -331,14 +314,10 @@ spec: - name: name value: buildah-remote-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:6de65de9e78397d4f3be81b0ed7cef4b7b2cb6dceee059bcc741c8ff9d6e128a + value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.8@sha256:f579f6d72db53da9e0b3f84e89d66d0f81c360a0785576308a93c3774b73be11 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - name: build-image-index params: @@ -358,39 +337,12 @@ spec: - name: name value: build-image-index - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8c422a5380a3d877257003dee153190322af84fe6f4f25e9eee7d8bf61a62577 + value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.2@sha256:8fd2bd1329a833cf1c3c049c0044961d6f5d5bfc954ebf188c19a56852c42600 - name: kind value: task resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] runAfter: [build-images] - - name: apply-index-image-tag - params: - - name: IMAGE_URL - value: $(tasks.build-image-index.results.IMAGE_URL) - - name: IMAGE_DIGEST - value: $(tasks.build-image-index.results.IMAGE_DIGEST) - - name: ADDITIONAL_TAGS - value: - - konflux-$(params.revision) - taskRef: - params: - - name: name - value: apply-tags - - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.3@sha256:510b6d2a3b188adeb716e49566b57d611ab36bd69a2794b5ddfc11dbf014c2ca - - name: kind - value: task - resolver: bundles - when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - - name: build-source-image params: - name: BINARY_IMAGE @@ -411,9 +363,6 @@ spec: value: task resolver: bundles when: - - input: $(tasks.init.results.build) - operator: in - values: ["true"] - input: $(params.build-source-image) operator: in values: ["true"] @@ -454,7 +403,7 @@ spec: - name: name value: clair-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:dadfea7633d82e4303ba73d5e9c7e2bc16834bde0fd7688880453b26452067eb + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:b01d8e2c58eb407ac23fa07b8e44c4631f0cf7257e87507c829fa2486aff9804 - name: kind value: task resolver: bundles @@ -501,7 +450,7 @@ spec: - name: name value: sast-shell-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:1f67b661458c549ab299bcdddb5e2b799af8c89d3c594567eb654d870000b5ec + value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:f475b4b6b0c1687fa1aafa5ba38813e04f080b185af2975e12b457742d9dd857 - name: kind value: task resolver: bundles @@ -525,7 +474,7 @@ spec: - name: name value: sast-unicode-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:9d2ab1bcd65f56ce32fe366a955abc8ac76228734a3f3642ac9af8af86fbb4d1 + value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.4@sha256:b38140b2f0b2163def80e28a792b2702245d38a5610a504f2e56c198f3b8f70b - name: kind value: task resolver: bundles @@ -549,7 +498,7 @@ spec: - name: name value: sast-snyk-check-oci-ta - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0fc62e57ab2c75adf5eaa5c3e5aaeb4845dbf029ddd159b688bc5804579b639f + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:0c2ab8ce6d419400b63dd67d061052ac51de7b1ebe93f8ae86ed07ac638d756d - name: kind value: task resolver: bundles @@ -574,7 +523,7 @@ spec: - name: name value: clamav-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:4f5ccf2324ecef92aaad6e2adb46c0bb15be49b4869b5b407346c514b764404f + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:5b5b31eae9063a00b91acc049b536e548d87c730068e439eefe33ab5238ee118 - name: kind value: task resolver: bundles @@ -594,7 +543,7 @@ spec: - name: name value: rpms-signature-scan - name: bundle - value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:ccd087c879899b4c7fe2e05c5a2fa5b9829f4826fa2bd60e0db5b1d4bf1a716e + value: quay.io/konflux-ci/tekton-catalog/task-rpms-signature-scan:0.2@sha256:a99d8fd4c9027356b18e5d2910cc44dbc2fcb53c384ba34696645d9e7faa9084 - name: kind value: task resolver: bundles From 09177e414985ca990463f20bb3edfc4c6a9f2d4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Feb 2026 06:13:10 -0500 Subject: [PATCH 159/232] chore(deps): bump github.com/Azure/azure-sdk-for-go-extensions from 0.5.0 to 0.5.1 (#18914) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 272c94f58ee0c..441baf54f9655 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( cloud.google.com/go/securitycenter v1.38.1 cloud.google.com/go/storage v1.59.2 dario.cat/mergo v1.0.2 - github.com/Azure/azure-sdk-for-go-extensions v0.5.0 + github.com/Azure/azure-sdk-for-go-extensions v0.5.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 github.com/Azure/azure-sdk-for-go/sdk/containers/azcontainerregistry v0.2.3 @@ -490,15 +490,15 @@ require ( go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect - go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.39.0 // indirect - go.opentelemetry.io/otel/sdk v1.39.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect - go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect diff --git a/go.sum b/go.sum index 244138cb3ce98..70b70faac6b6d 100644 --- a/go.sum +++ b/go.sum @@ -90,8 +90,8 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go-extensions v0.5.0 h1:Hvzr/oLQ1XsrD2AylWy0ii99Hz4Te4O/kZEfgC4B6mo= -github.com/Azure/azure-sdk-for-go-extensions v0.5.0/go.mod h1:kUQAe8kR4uL8fWbBUsrlb5WudbgkmLxuqNkzKUo+0DI= +github.com/Azure/azure-sdk-for-go-extensions v0.5.1 h1:kV3u4tAWoFd+0wipN7QKSWckDkAHR06mZ3LglDuYSVM= +github.com/Azure/azure-sdk-for-go-extensions v0.5.1/go.mod h1:adhNwBpL1vnUS6yvTCbu0tVB/b6SdmmQhU9SpwYtjjY= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= @@ -1681,11 +1681,11 @@ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQ go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= -go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -1710,17 +1710,17 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= -go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= -go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= -go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= -go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= -go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= From c3b7b3ca4f681494a3464f391f791b2b910ff7ff Mon Sep 17 00:00:00 2001 From: Mauro Ezequiel Moltrasio Date: Wed, 11 Feb 2026 12:25:30 +0100 Subject: [PATCH 160/232] fix(fact): push fact manifest alongside collector and scanner (#18920) --- .github/workflows/build.yaml | 2 +- scripts/ci/lib.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 750e389193dac..b83806c1e8957 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -65,7 +65,7 @@ jobs: matrix="$(jq '.push_operator_multiarch_manifests.archs = [.build_and_push_operator.arch | join(",")]' <<< "$matrix")" matrix="$(jq '.scan_images_with_roxctl.name += ["RHACS_BRANDING", "STACKROX_BRANDING"]' <<< "$matrix")" - matrix="$(jq '.scan_images_with_roxctl.image += ["central-db", "collector", "main", "roxctl", "scanner", "scanner-db", "scanner-db-slim", "scanner-slim", "stackrox-operator"]' <<< "$matrix")" + matrix="$(jq '.scan_images_with_roxctl.image += ["central-db", "collector", "fact", "main", "roxctl", "scanner", "scanner-db", "scanner-db-slim", "scanner-slim", "stackrox-operator"]' <<< "$matrix")" # TODO(ROX-27191): remove the exclusion once there's a community operator. matrix="$(jq '.scan_images_with_roxctl.exclude += [{ "name": "STACKROX_BRANDING", "image": "stackrox-operator" }]' <<< "$matrix")" diff --git a/scripts/ci/lib.sh b/scripts/ci/lib.sh index a6e41e259e581..8bdbf9f376d60 100755 --- a/scripts/ci/lib.sh +++ b/scripts/ci/lib.sh @@ -254,8 +254,8 @@ push_image_manifest_lists() { fi done - # Push manifest lists for scanner and collector for amd64 only - local amd64_image_set=("scanner" "scanner-db" "scanner-slim" "scanner-db-slim" "collector") + # Push manifest lists for scanner, fact and collector for amd64 only + local amd64_image_set=("scanner" "scanner-db" "scanner-slim" "scanner-db-slim" "collector" "fact") for image in "${amd64_image_set[@]}"; do retry 5 true \ "$SCRIPTS_ROOT/scripts/ci/push-as-multiarch-manifest-list.sh" "${registry}/${image}:${tag}" "amd64" | cat From b234a722c7d45c377bec4283c4d0b3707b674f34 Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Wed, 11 Feb 2026 09:25:22 -0500 Subject: [PATCH 161/232] ROX-33057: Fix CVE info datastore mutex deadlock (#18952) Co-authored-by: Claude Opus 4.6 (1M context) --- .../image/info/datastore/datastore_impl.go | 10 ++++ .../info/datastore/store/postgres/gen.go | 2 +- .../info/datastore/store/postgres/store.go | 56 +------------------ 3 files changed, 12 insertions(+), 56 deletions(-) diff --git a/central/cve/image/info/datastore/datastore_impl.go b/central/cve/image/info/datastore/datastore_impl.go index 8bbc2f0259db5..57a689bbd006f 100644 --- a/central/cve/image/info/datastore/datastore_impl.go +++ b/central/cve/image/info/datastore/datastore_impl.go @@ -8,10 +8,14 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sliceutils" + "github.com/stackrox/rox/pkg/sync" ) type datastoreImpl struct { storage store.Store + // upsertLock serializes Upsert and UpsertMany to make the + // read-modify-write of timestamp merging atomic. + upsertLock sync.Mutex } func (ds *datastoreImpl) SearchRawImageCVEInfos(ctx context.Context, q *v1.Query) ([]*storage.ImageCVEInfo, error) { @@ -41,6 +45,9 @@ func (ds *datastoreImpl) GetBatch(ctx context.Context, ids []string) ([]*storage } func (ds *datastoreImpl) Upsert(ctx context.Context, info *storage.ImageCVEInfo) error { + ds.upsertLock.Lock() + defer ds.upsertLock.Unlock() + existing, found, err := ds.Get(ctx, info.GetId()) if err != nil { return err @@ -53,6 +60,9 @@ func (ds *datastoreImpl) Upsert(ctx context.Context, info *storage.ImageCVEInfo) } func (ds *datastoreImpl) UpsertMany(ctx context.Context, infos []*storage.ImageCVEInfo) error { + ds.upsertLock.Lock() + defer ds.upsertLock.Unlock() + // Create a list of ids to look up ids := sliceutils.Map[*storage.ImageCVEInfo, string](infos, func(info *storage.ImageCVEInfo) string { return info.GetId() diff --git a/central/cve/image/info/datastore/store/postgres/gen.go b/central/cve/image/info/datastore/store/postgres/gen.go index 22f5743e25a80..ee0dbb0186f58 100644 --- a/central/cve/image/info/datastore/store/postgres/gen.go +++ b/central/cve/image/info/datastore/store/postgres/gen.go @@ -1,3 +1,3 @@ package postgres -//go:generate pg-table-bindings-wrapper --type=storage.ImageCVEInfo --cached-store +//go:generate pg-table-bindings-wrapper --type=storage.ImageCVEInfo --cached-store --no-copy-from diff --git a/central/cve/image/info/datastore/store/postgres/store.go b/central/cve/image/info/datastore/store/postgres/store.go index a19af9e49b679..c9d62a93a4de7 100644 --- a/central/cve/image/info/datastore/store/postgres/store.go +++ b/central/cve/image/info/datastore/store/postgres/store.go @@ -68,7 +68,7 @@ func New(db postgres.DB) Store { schema, pkGetter, insertIntoImageCveInfos, - copyFromImageCveInfos, + nil, metricsSetAcquireDBConnDuration, metricsSetPostgresOperationDurationTime, metricsSetCacheOperationDurationTime, @@ -118,58 +118,4 @@ func insertIntoImageCveInfos(batch *pgx.Batch, obj *storage.ImageCVEInfo) error return nil } -var copyColsImageCveInfos = []string{ - "id", - "fixavailabletimestamp", - "firstsystemoccurrence", - "cve", - "serialized", -} - -func copyFromImageCveInfos(ctx context.Context, s pgSearch.Deleter, tx *postgres.Tx, objs ...*storage.ImageCVEInfo) error { - if len(objs) == 0 { - return nil - } - - { - // CopyFrom does not upsert, so delete existing rows first to achieve upsert behavior. - // Parent deletion cascades to children, so only the top-level parent needs deletion. - deletes := make([]string, 0, len(objs)) - for _, obj := range objs { - deletes = append(deletes, obj.GetId()) - } - if err := s.DeleteMany(ctx, deletes); err != nil { - return err - } - } - - idx := 0 - inputRows := pgx.CopyFromFunc(func() ([]any, error) { - if idx >= len(objs) { - return nil, nil - } - obj := objs[idx] - idx++ - - serialized, marshalErr := obj.MarshalVT() - if marshalErr != nil { - return nil, marshalErr - } - - return []interface{}{ - obj.GetId(), - protocompat.NilOrTime(obj.GetFixAvailableTimestamp()), - protocompat.NilOrTime(obj.GetFirstSystemOccurrence()), - obj.GetCve(), - serialized, - }, nil - }) - - if _, err := tx.CopyFrom(ctx, pgx.Identifier{"image_cve_infos"}, copyColsImageCveInfos, inputRows); err != nil { - return err - } - - return nil -} - // endregion Helper functions From 6e911fcd341ae3248b1bebb8e23ac534b4d7dcd3 Mon Sep 17 00:00:00 2001 From: Brad Rogers <61400697+bradr5@users.noreply.github.com> Date: Wed, 11 Feb 2026 09:15:55 -0600 Subject: [PATCH 162/232] ROX-27260: Fix policy scope validation for edit/clone (#18925) --- .../Wizard/policyValidationSchemas.test.ts | 29 +++++++++++++++++++ .../Wizard/policyValidationSchemas.ts | 3 +- .../src/Containers/Policies/policies.utils.ts | 2 +- 3 files changed, 32 insertions(+), 2 deletions(-) diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.test.ts b/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.test.ts index 958693d69a02e..938f2c601aa3e 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.test.ts +++ b/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.test.ts @@ -415,6 +415,35 @@ describe('Step 4', () => { expect(validationSchemaStep4.validateSync(value)).toEqual(value); }); + it('passes if name has non-empty string and scope is null', () => { + const value: WizardPolicyStep4 = { + scope: [], + excludedDeploymentScopes: [ + { + name: 'node-resolver', + scope: null, + }, + ], + excludedImageNames: [], + }; + expect(validationSchemaStep4.validateSync(value)).toEqual(value); + }); + + it('throws if scope is null and name is absent', () => { + const value: WizardPolicyStep4 = { + scope: [], + excludedDeploymentScopes: [ + { + scope: null, + }, + ], + excludedImageNames: [], + }; + expect(() => { + validationSchemaStep4.validateSync(value); + }).toThrow(); + }); + it('throws if first excluded deployment has non-empty strings and second excluded deployment has initial values', () => { const value: WizardPolicyStep4 = { scope: [], diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.ts b/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.ts index 77486d6ae03c3..3799e49e02732 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.ts +++ b/ui/apps/platform/src/Containers/Policies/Wizard/policyValidationSchemas.ts @@ -200,7 +200,8 @@ export const validationSchemaStep4: yup.ObjectSchema = yup.ob .object() .shape({ name: yup.string(), - scope: scopeSchema, + // Backend returns scope as null for name only exclusions. + scope: scopeSchema.nullable(), }) .test( 'excluded-deployment-has-at-least-one-property', diff --git a/ui/apps/platform/src/Containers/Policies/policies.utils.ts b/ui/apps/platform/src/Containers/Policies/policies.utils.ts index a4da31e61bbdd..ec7cb99157774 100644 --- a/ui/apps/platform/src/Containers/Policies/policies.utils.ts +++ b/ui/apps/platform/src/Containers/Policies/policies.utils.ts @@ -274,7 +274,7 @@ export type WizardPolicyStep4 = { export type WizardExcludedDeployment = { name?: string; - scope: WizardScope; + scope: WizardScope | null; }; /* From 6ae76333fa6b742443b3df97f06c1b16c748174f Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Wed, 11 Feb 2026 16:44:43 +0100 Subject: [PATCH 163/232] ROX-31216: show signature integration name - take 2 (#18899) --- central/graphql/resolvers/gen/main.go | 4 + .../resolvers/image_signature_verification.go | 30 ++++ central/graphql/resolvers/resolver.go | 3 + central/image/service/service.go | 29 ++-- central/image/service/service_impl.go | 22 ++- .../service/service_impl_benchmark_test.go | 2 +- central/image/service/singleton.go | 2 + central/imagev2/datastore/datastore.go | 1 - generated/api/v1/image_service.swagger.json | 6 +- .../api/v1/vuln_mgmt_service.swagger.json | 6 +- generated/storage/image.pb.go | 20 ++- generated/storage/image_vtproto.pb.go | 83 ++++++++++ pkg/signatureintegration/enricher.go | 66 ++++++++ pkg/signatureintegration/enricher_test.go | 151 ++++++++++++++++++ proto/storage/image.proto | 4 +- proto/storage/proto.lock | 5 + .../Image/ImagePageSignatureVerification.tsx | 25 ++- .../components/ImageDetailBadges.tsx | 1 + .../src/Containers/Vulnerabilities/types.ts | 1 + 19 files changed, 435 insertions(+), 26 deletions(-) create mode 100644 central/graphql/resolvers/image_signature_verification.go create mode 100644 pkg/signatureintegration/enricher.go create mode 100644 pkg/signatureintegration/enricher_test.go diff --git a/central/graphql/resolvers/gen/main.go b/central/graphql/resolvers/gen/main.go index 3ca94ad62b263..dd437e60ea284 100644 --- a/central/graphql/resolvers/gen/main.go +++ b/central/graphql/resolvers/gen/main.go @@ -127,6 +127,10 @@ var ( ParentType: reflect.TypeOf(storage.ImageComponentV2{}), FieldName: "Location", }, + { + ParentType: reflect.TypeOf(storage.ImageSignatureVerificationResult{}), + FieldName: "VerifierName", + }, }, InputTypes: []reflect.Type{ reflect.TypeOf((*inputtypes.FalsePositiveVulnRequest)(nil)), diff --git a/central/graphql/resolvers/image_signature_verification.go b/central/graphql/resolvers/image_signature_verification.go new file mode 100644 index 0000000000000..626b38875ab49 --- /dev/null +++ b/central/graphql/resolvers/image_signature_verification.go @@ -0,0 +1,30 @@ +package resolvers + +import ( + "context" + + "github.com/stackrox/rox/pkg/signatureintegration" + "github.com/stackrox/rox/pkg/utils" +) + +func init() { + schema := getBuilder() + utils.Must( + schema.AddExtraResolver("ImageSignatureVerificationResult", "verifierName: String!"), + ) +} + +// VerifierName returns the signature integration name. If the name was +// pre-populated by the service, it is returned directly. Otherwise, a lookup +// is performed from the SignatureIntegrationDataStore. +func (resolver *imageSignatureVerificationResultResolver) VerifierName(ctx context.Context) (string, error) { + // Short-circuit if the name was already populated by the service. + if name := resolver.data.GetVerifierName(); name != "" { + return name, nil + } + return signatureintegration.GetVerifierName( + ctx, + resolver.root.SignatureIntegrationDataStore, + resolver.data.GetVerifierId(), + ) +} diff --git a/central/graphql/resolvers/resolver.go b/central/graphql/resolvers/resolver.go index 6671e78dccf92..7d90f9e275efd 100644 --- a/central/graphql/resolvers/resolver.go +++ b/central/graphql/resolvers/resolver.go @@ -49,6 +49,7 @@ import ( roleDataStore "github.com/stackrox/rox/central/role/datastore" secretDataStore "github.com/stackrox/rox/central/secret/datastore" serviceAccountDataStore "github.com/stackrox/rox/central/serviceaccount/datastore" + signatureIntegrationDataStore "github.com/stackrox/rox/central/signatureintegration/datastore" "github.com/stackrox/rox/central/views/imagecomponentflat" "github.com/stackrox/rox/central/views/imagecve" "github.com/stackrox/rox/central/views/imagecveflat" @@ -106,6 +107,7 @@ type Resolver struct { ViolationsDataStore violationsDatastore.DataStore BaselineDataStore baselineStore.DataStore WatchedImageDataStore watchedImageDataStore.DataStore + SignatureIntegrationDataStore signatureIntegrationDataStore.DataStore orchestratorIstioCVEManager fetcher.OrchestratorIstioCVEManager cveMatcher *cveMatcher.CVEMatcher manager complianceOperatorManager.Manager @@ -156,6 +158,7 @@ func New() *Resolver { ViolationsDataStore: violationsDatastore.Singleton(), BaselineDataStore: baselineStore.Singleton(), WatchedImageDataStore: watchedImageDataStore.Singleton(), + SignatureIntegrationDataStore: signatureIntegrationDataStore.Singleton(), orchestratorIstioCVEManager: fetcher.SingletonManager(), cveMatcher: cveMatcher.Singleton(), manager: complianceOperatorManager.Singleton(), diff --git a/central/image/service/service.go b/central/image/service/service.go index 24b30c1f2bf0b..2666b07eab8e3 100644 --- a/central/image/service/service.go +++ b/central/image/service/service.go @@ -9,6 +9,7 @@ import ( "github.com/stackrox/rox/central/risk/manager" "github.com/stackrox/rox/central/role/sachelper" "github.com/stackrox/rox/central/sensor/service/connection" + signatureIntegrationDS "github.com/stackrox/rox/central/signatureintegration/datastore" watchedImageDataStore "github.com/stackrox/rox/central/watchedimage/datastore" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" @@ -49,21 +50,23 @@ func New( scanWaiterManager waiter.Manager[*storage.Image], scanWaiterManagerV2 waiter.Manager[*storage.ImageV2], clusterSACHelper sachelper.ClusterSacHelper, + signatureIntegrationDataStore signatureIntegrationDS.DataStore, ) Service { images.SetCentralScanSemaphoreLimit(float64(env.MaxParallelImageScanInternal.IntegerSetting())) return &serviceImpl{ - datastore: datastore, - datastoreV2: datastoreV2, - mappingDatastore: mappingDatastore, - watchedImages: watchedImages, - riskManager: riskManager, - enricher: enricher, - enricherV2: enricherV2, - metadataCache: metadataCache, - connManager: connManager, - scanWaiterManager: scanWaiterManager, - scanWaiterManagerV2: scanWaiterManagerV2, - internalScanSemaphore: semaphore.NewWeighted(int64(env.MaxParallelImageScanInternal.IntegerSetting())), - clusterSACHelper: clusterSACHelper, + datastore: datastore, + datastoreV2: datastoreV2, + mappingDatastore: mappingDatastore, + watchedImages: watchedImages, + riskManager: riskManager, + enricher: enricher, + enricherV2: enricherV2, + metadataCache: metadataCache, + connManager: connManager, + scanWaiterManager: scanWaiterManager, + scanWaiterManagerV2: scanWaiterManagerV2, + internalScanSemaphore: semaphore.NewWeighted(int64(env.MaxParallelImageScanInternal.IntegerSetting())), + clusterSACHelper: clusterSACHelper, + signatureIntegrationDataStore: signatureIntegrationDataStore, } } diff --git a/central/image/service/service_impl.go b/central/image/service/service_impl.go index e06a4622a53f2..e08294d96f35f 100644 --- a/central/image/service/service_impl.go +++ b/central/image/service/service_impl.go @@ -17,6 +17,7 @@ import ( "github.com/stackrox/rox/central/risk/manager" "github.com/stackrox/rox/central/role/sachelper" "github.com/stackrox/rox/central/sensor/service/connection" + signatureIntegrationDS "github.com/stackrox/rox/central/signatureintegration/datastore" watchedImageDataStore "github.com/stackrox/rox/central/watchedimage/datastore" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/internalapi/central" @@ -43,6 +44,7 @@ import ( "github.com/stackrox/rox/pkg/search" "github.com/stackrox/rox/pkg/search/paginated" "github.com/stackrox/rox/pkg/set" + "github.com/stackrox/rox/pkg/signatureintegration" "github.com/stackrox/rox/pkg/timestamp" pkgUtils "github.com/stackrox/rox/pkg/utils" "github.com/stackrox/rox/pkg/waiter" @@ -95,7 +97,8 @@ var ( imageScanMetricsLabel = prometheus.Labels{ "subsystem": "central", "entity": "central-image-scan-service", - "requestedFrom": "n/a"} + "requestedFrom": "n/a", + } ) // serviceImpl provides APIs for alerts. @@ -123,6 +126,8 @@ type serviceImpl struct { scanWaiterManagerV2 waiter.Manager[*storage.ImageV2] clusterSACHelper sachelper.ClusterSacHelper + + signatureIntegrationDataStore signatureIntegrationDS.DataStore } // RegisterServiceServer registers this service with the given gRPC Server. @@ -165,6 +170,9 @@ func (s *serviceImpl) GetImage(ctx context.Context, request *v1.GetImageRequest) utils.StripCVEDescriptionsNoClone(image) } utils.StripDatasourceNoClone(image.GetScan()) + signatureintegration.EnrichVerificationResults(ctx, + s.signatureIntegrationDataStore, image.GetSignatureVerificationData().GetResults(), + ) return image, nil } @@ -218,6 +226,9 @@ func (s *serviceImpl) ExportImages(req *v1.ExportImageRequest, srv v1.ImageServi } return s.mappingDatastore.WalkByQuery(ctx, parsedQuery, func(image *storage.Image) error { utils.StripDatasourceNoClone(image.GetScan()) + signatureintegration.EnrichVerificationResults(ctx, + s.signatureIntegrationDataStore, image.GetSignatureVerificationData().GetResults(), + ) if err := srv.Send(&v1.ExportImageResponse{Image: image}); err != nil { return err } @@ -626,7 +637,11 @@ func (s *serviceImpl) ScanImage(ctx context.Context, request *v1.ScanImageReques } utils.StripDatasourceNoClone(imgV2.GetScan()) - return utils.ConvertToV1(imgV2), nil + img := utils.ConvertToV1(imgV2) + signatureintegration.EnrichVerificationResults(ctx, + s.signatureIntegrationDataStore, img.GetSignatureVerificationData().GetResults(), + ) + return img, nil } img, err := enricher.EnrichImageByName(ctx, s.enricher, enrichmentCtx, request.GetImageName()) @@ -652,6 +667,9 @@ func (s *serviceImpl) ScanImage(ctx context.Context, request *v1.ScanImageReques utils.FilterSuppressedCVEsNoClone(img) } utils.StripDatasourceNoClone(img.GetScan()) + signatureintegration.EnrichVerificationResults(ctx, + s.signatureIntegrationDataStore, img.GetSignatureVerificationData().GetResults(), + ) return img, nil } diff --git a/central/image/service/service_impl_benchmark_test.go b/central/image/service/service_impl_benchmark_test.go index 92125cce518cc..83d0c5e0093de 100644 --- a/central/image/service/service_impl_benchmark_test.go +++ b/central/image/service/service_impl_benchmark_test.go @@ -22,7 +22,7 @@ func BenchmarkService_Export(b *testing.B) { b.Error(err) } - svc := New(testHelper.Images, testHelper.ImagesV2, testHelper.Images, nil, nil, nil, nil, nil, nil, nil, nil, nil) + svc := New(testHelper.Images, testHelper.ImagesV2, testHelper.Images, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) benchmarkFunc := getExportServiceBenchmark(testHelper, svc) testHelper.InjectDataAndRunBenchmark(b, true, benchmarkFunc) } diff --git a/central/image/service/singleton.go b/central/image/service/singleton.go index fd497522049f0..2eb117c289460 100644 --- a/central/image/service/singleton.go +++ b/central/image/service/singleton.go @@ -11,6 +11,7 @@ import ( "github.com/stackrox/rox/central/risk/manager" "github.com/stackrox/rox/central/role/sachelper" "github.com/stackrox/rox/central/sensor/service/connection" + signatureIntegrationDS "github.com/stackrox/rox/central/signatureintegration/datastore" watchedImageDataStore "github.com/stackrox/rox/central/watchedimage/datastore" "github.com/stackrox/rox/pkg/images/cache" "github.com/stackrox/rox/pkg/sync" @@ -36,6 +37,7 @@ func initialize() { scanwaiter.Singleton(), scanwaiterv2.Singleton(), sachelper.NewClusterSacHelper(clusterDataStore.Singleton()), + signatureIntegrationDS.Singleton(), ) } diff --git a/central/imagev2/datastore/datastore.go b/central/imagev2/datastore/datastore.go index 822590f67a77d..2238dfa86049e 100644 --- a/central/imagev2/datastore/datastore.go +++ b/central/imagev2/datastore/datastore.go @@ -33,7 +33,6 @@ type DataStore interface { DeleteImages(ctx context.Context, ids ...string) error Exists(ctx context.Context, id string) (bool, error) - GetImageIDsAndDigests(ctx context.Context, q *v1.Query) ([]*views.ImageIDAndDigestView, error) } diff --git a/generated/api/v1/image_service.swagger.json b/generated/api/v1/image_service.swagger.json index c294f0865c1f0..962d131a39716 100644 --- a/generated/api/v1/image_service.swagger.json +++ b/generated/api/v1/image_service.swagger.json @@ -1279,9 +1279,13 @@ "type": "string" }, "description": "The full image names that are verified by this specific signature integration ID." + }, + "verifierName": { + "type": "string", + "description": "verifier_name is the name of the signature integration associated with `verifier_id`." } }, - "title": "Next Tag: 6" + "title": "Next Tag: 7" }, "storageImageSignatureVerificationResultStatus": { "type": "string", diff --git a/generated/api/v1/vuln_mgmt_service.swagger.json b/generated/api/v1/vuln_mgmt_service.swagger.json index 5655f69e2ed52..3cf96975499a8 100644 --- a/generated/api/v1/vuln_mgmt_service.swagger.json +++ b/generated/api/v1/vuln_mgmt_service.swagger.json @@ -1235,9 +1235,13 @@ "type": "string" }, "description": "The full image names that are verified by this specific signature integration ID." + }, + "verifierName": { + "type": "string", + "description": "verifier_name is the name of the signature integration associated with `verifier_id`." } }, - "title": "Next Tag: 6" + "title": "Next Tag: 7" }, "storageImageSignatureVerificationResultStatus": { "type": "string", diff --git a/generated/storage/image.pb.go b/generated/storage/image.pb.go index 102c049cfda62..50101ba9462ee 100644 --- a/generated/storage/image.pb.go +++ b/generated/storage/image.pb.go @@ -759,7 +759,7 @@ func (x *ImageSignatureVerificationData) GetResults() []*ImageSignatureVerificat return nil } -// Next Tag: 6 +// Next Tag: 7 type ImageSignatureVerificationResult struct { state protoimpl.MessageState `protogen:"open.v1"` VerificationTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=verification_time,json=verificationTime,proto3" json:"verification_time,omitempty"` @@ -770,8 +770,10 @@ type ImageSignatureVerificationResult struct { Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` // The full image names that are verified by this specific signature integration ID. VerifiedImageReferences []string `protobuf:"bytes,5,rep,name=verified_image_references,json=verifiedImageReferences,proto3" json:"verified_image_references,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // verifier_name is the name of the signature integration associated with `verifier_id`. + VerifierName string `protobuf:"bytes,6,opt,name=verifier_name,json=verifierName,proto3" json:"verifier_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ImageSignatureVerificationResult) Reset() { @@ -839,6 +841,13 @@ func (x *ImageSignatureVerificationResult) GetVerifiedImageReferences() []string return nil } +func (x *ImageSignatureVerificationResult) GetVerifierName() string { + if x != nil { + return x.VerifierName + } + return "" +} + // Next Tag: 14 type EmbeddedImageScanComponent struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2067,14 +2076,15 @@ const file_storage_image_proto_rawDesc = "" + "\x1fCERTIFIED_RHEL_SCAN_UNAVAILABLE\x10\x06B\v\n" + "\thashoneof\"e\n" + "\x1eImageSignatureVerificationData\x12C\n" + - "\aresults\x18\x01 \x03(\v2).storage.ImageSignatureVerificationResultR\aresults\"\xb9\x03\n" + + "\aresults\x18\x01 \x03(\v2).storage.ImageSignatureVerificationResultR\aresults\"\xde\x03\n" + " ImageSignatureVerificationResult\x12G\n" + "\x11verification_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x10verificationTime\x12\x1f\n" + "\vverifier_id\x18\x02 \x01(\tR\n" + "verifierId\x12H\n" + "\x06status\x18\x03 \x01(\x0e20.storage.ImageSignatureVerificationResult.StatusR\x06status\x12 \n" + "\vdescription\x18\x04 \x01(\tR\vdescription\x12:\n" + - "\x19verified_image_references\x18\x05 \x03(\tR\x17verifiedImageReferences\"\x82\x01\n" + + "\x19verified_image_references\x18\x05 \x03(\tR\x17verifiedImageReferences\x12#\n" + + "\rverifier_name\x18\x06 \x01(\tR\fverifierName\"\x82\x01\n" + "\x06Status\x12\t\n" + "\x05UNSET\x10\x00\x12\f\n" + "\bVERIFIED\x10\x01\x12\x17\n" + diff --git a/generated/storage/image_vtproto.pb.go b/generated/storage/image_vtproto.pb.go index 5424204c95f3a..46f1aa2fd8e82 100644 --- a/generated/storage/image_vtproto.pb.go +++ b/generated/storage/image_vtproto.pb.go @@ -213,6 +213,7 @@ func (m *ImageSignatureVerificationResult) CloneVT() *ImageSignatureVerification r.VerifierId = m.VerifierId r.Status = m.Status r.Description = m.Description + r.VerifierName = m.VerifierName if rhs := m.VerifiedImageReferences; rhs != nil { tmpContainer := make([]string, len(rhs)) copy(tmpContainer, rhs) @@ -1029,6 +1030,9 @@ func (this *ImageSignatureVerificationResult) EqualVT(that *ImageSignatureVerifi return false } } + if this.VerifierName != that.VerifierName { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -2233,6 +2237,13 @@ func (m *ImageSignatureVerificationResult) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.VerifierName) > 0 { + i -= len(m.VerifierName) + copy(dAtA[i:], m.VerifierName) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VerifierName))) + i-- + dAtA[i] = 0x32 + } if len(m.VerifiedImageReferences) > 0 { for iNdEx := len(m.VerifiedImageReferences) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.VerifiedImageReferences[iNdEx]) @@ -3577,6 +3588,10 @@ func (m *ImageSignatureVerificationResult) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } + l = len(m.VerifierName) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -5344,6 +5359,38 @@ func (m *ImageSignatureVerificationResult) UnmarshalVT(dAtA []byte) error { } m.VerifiedImageReferences = append(m.VerifiedImageReferences, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifierName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VerifierName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -9458,6 +9505,42 @@ func (m *ImageSignatureVerificationResult) UnmarshalVTUnsafe(dAtA []byte) error } m.VerifiedImageReferences = append(m.VerifiedImageReferences, stringValue) iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifierName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.VerifierName = stringValue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/signatureintegration/enricher.go b/pkg/signatureintegration/enricher.go new file mode 100644 index 0000000000000..3b208791e45d8 --- /dev/null +++ b/pkg/signatureintegration/enricher.go @@ -0,0 +1,66 @@ +package signatureintegration + +import ( + "context" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/sac/resources" +) + +var log = logging.LoggerForModule() + +// Getter provides access to signature integration data. +type Getter interface { + GetSignatureIntegration(ctx context.Context, id string) (*storage.SignatureIntegration, bool, error) +} + +// integrationReadContext creates a SAC context with Integration read access. +func integrationReadContext(ctx context.Context) context.Context { + return sac.WithGlobalAccessScopeChecker(ctx, + sac.AllowFixedScopes( + sac.ResourceScopeKeys(resources.Integration), + sac.AccessModeScopeKeys(storage.Access_READ_ACCESS), + ), + ) +} + +// EnrichVerificationResults populates the VerifierName field in all +// signature verification results. +func EnrichVerificationResults(ctx context.Context, getter Getter, results []*storage.ImageSignatureVerificationResult) { + integrationCtx := integrationReadContext(ctx) + for _, result := range results { + name, err := getVerifierNameWithCtx(integrationCtx, getter, result.GetVerifierId()) + if err != nil { + log.Debugf("Failed to get signature integration name for ID %s: %v", result.GetVerifierId(), err) + continue + } + result.VerifierName = name + } +} + +// GetVerifierName returns the verifier name for a single verification result. +// This is useful for lazy lookups in GraphQL resolvers. +func GetVerifierName(ctx context.Context, getter Getter, verifierID string) (string, error) { + integrationCtx := integrationReadContext(ctx) + return getVerifierNameWithCtx(integrationCtx, getter, verifierID) +} + +// getVerifierNameWithCtx is an internal helper that assumes the context already +// has the necessary SAC permissions. This avoids re-wrapping the context when +// enriching multiple results. +func getVerifierNameWithCtx(ctx context.Context, getter Getter, verifierID string) (string, error) { + if verifierID == "" { + return "", nil + } + + integration, found, err := getter.GetSignatureIntegration(ctx, verifierID) + if err != nil { + return "", err + } + if !found { + return "", nil + } + return integration.GetName(), nil +} diff --git a/pkg/signatureintegration/enricher_test.go b/pkg/signatureintegration/enricher_test.go new file mode 100644 index 0000000000000..28ed706e3e4f1 --- /dev/null +++ b/pkg/signatureintegration/enricher_test.go @@ -0,0 +1,151 @@ +package signatureintegration + +import ( + "context" + "errors" + "testing" + + "github.com/stackrox/rox/generated/storage" + "github.com/stretchr/testify/assert" +) + +// mockGetter implements Getter for testing. +type mockGetter struct { + integrations map[string]*storage.SignatureIntegration + err error +} + +func (m *mockGetter) GetSignatureIntegration(_ context.Context, id string) (*storage.SignatureIntegration, bool, error) { + if m.err != nil { + return nil, false, m.err + } + integration, found := m.integrations[id] + return integration, found, nil +} + +func TestGetVerifierName(t *testing.T) { + ctx := context.Background() + + t.Run("returns empty string for empty verifier ID", func(t *testing.T) { + mock := &mockGetter{} + + name, err := GetVerifierName(ctx, mock, "") + assert.NoError(t, err) + assert.Empty(t, name) + }) + + t.Run("returns integration name when found", func(t *testing.T) { + mock := &mockGetter{ + integrations: map[string]*storage.SignatureIntegration{ + "test-id": {Name: "my-integration"}, + }, + } + + name, err := GetVerifierName(ctx, mock, "test-id") + assert.NoError(t, err) + assert.Equal(t, "my-integration", name) + }) + + t.Run("returns empty string when not found", func(t *testing.T) { + mock := &mockGetter{ + integrations: map[string]*storage.SignatureIntegration{}, + } + + name, err := GetVerifierName(ctx, mock, "unknown-id") + assert.NoError(t, err) + assert.Empty(t, name) + }) + + t.Run("returns error when getter fails", func(t *testing.T) { + mock := &mockGetter{ + err: errors.New("datastore error"), + } + + name, err := GetVerifierName(ctx, mock, "test-id") + assert.Error(t, err) + assert.Empty(t, name) + }) +} + +func TestEnrichVerificationResults(t *testing.T) { + ctx := context.Background() + + t.Run("handles empty results slice", func(t *testing.T) { + mock := &mockGetter{} + + var results []*storage.ImageSignatureVerificationResult + EnrichVerificationResults(ctx, mock, results) + // No panic, no error + }) + + t.Run("enriches multiple results", func(t *testing.T) { + mock := &mockGetter{ + integrations: map[string]*storage.SignatureIntegration{ + "id-1": {Name: "integration-1"}, + "id-2": {Name: "integration-2"}, + }, + } + + results := []*storage.ImageSignatureVerificationResult{ + {VerifierId: "id-1"}, + {VerifierId: "id-2"}, + } + + EnrichVerificationResults(ctx, mock, results) + + assert.Equal(t, "integration-1", results[0].GetVerifierName()) + assert.Equal(t, "integration-2", results[1].GetVerifierName()) + }) + + t.Run("leaves VerifierName empty when integration not found", func(t *testing.T) { + mock := &mockGetter{ + integrations: map[string]*storage.SignatureIntegration{}, + } + + results := []*storage.ImageSignatureVerificationResult{ + {VerifierId: "unknown-id"}, + } + + EnrichVerificationResults(ctx, mock, results) + + assert.Empty(t, results[0].GetVerifierName()) + }) + + t.Run("continues enriching after error", func(t *testing.T) { + callCount := 0 + mock := &mockGetter{ + integrations: map[string]*storage.SignatureIntegration{ + "id-2": {Name: "integration-2"}, + }, + } + // Override to return error for first call only + errorOnFirstCall := &errorOnFirstCallGetter{ + delegate: mock, + callCount: &callCount, + } + + results := []*storage.ImageSignatureVerificationResult{ + {VerifierId: "id-1"}, // Will error + {VerifierId: "id-2"}, // Should still be enriched + } + + EnrichVerificationResults(ctx, errorOnFirstCall, results) + + assert.Empty(t, results[0].GetVerifierName()) // Error case + assert.Equal(t, "integration-2", results[1].GetVerifierName()) // Continued after error + }) +} + +// errorOnFirstCallGetter returns an error on the first call, then delegates. +type errorOnFirstCallGetter struct { + delegate Getter + callCount *int +} + +func (e *errorOnFirstCallGetter) GetSignatureIntegration(ctx context.Context, id string) (*storage.SignatureIntegration, bool, error) { + *e.callCount++ + if *e.callCount == 1 { + return nil, false, errors.New("first call error") + } + return e.delegate.GetSignatureIntegration(ctx, id) +} diff --git a/proto/storage/image.proto b/proto/storage/image.proto index 6b81ad3ceb282..17e000e1bb6b1 100644 --- a/proto/storage/image.proto +++ b/proto/storage/image.proto @@ -91,7 +91,7 @@ message ImageSignatureVerificationData { repeated ImageSignatureVerificationResult results = 1; } -// Next Tag: 6 +// Next Tag: 7 message ImageSignatureVerificationResult { google.protobuf.Timestamp verification_time = 1; // verifier_id correlates to the ID of the signature integration used to verify the signature. @@ -116,6 +116,8 @@ message ImageSignatureVerificationResult { string description = 4; // The full image names that are verified by this specific signature integration ID. repeated string verified_image_references = 5; + // verifier_name is the name of the signature integration associated with `verifier_id`. + string verifier_name = 6; } // Next Tag: 14 diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index 972c06db4ea17..ec549f53db7ea 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -9073,6 +9073,11 @@ "name": "verifier_id", "type": "string" }, + { + "id": 6, + "name": "verifier_name", + "type": "string" + }, { "id": 3, "name": "status", diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx index 98f3881365739..3f839422a8f00 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx @@ -1,8 +1,12 @@ import { Divider, Flex, FlexItem, Label, PageSection, Text } from '@patternfly/react-core'; import { Table, TableText, Tbody, Td, Th, Thead, Tr } from '@patternfly/react-table'; import { CheckCircleIcon, ExclamationCircleIcon } from '@patternfly/react-icons'; +import { Link } from 'react-router-dom'; +import { integrationsPath } from 'routePaths'; import DateDistance from 'Components/DateDistance'; +import useIntegrationPermissions from 'Containers/Integrations/hooks/useIntegrationPermissions'; + import type { SignatureVerificationResult, VerifiedStatus } from '../../types'; export type ImagePageSignatureVerificationProps = { @@ -45,6 +49,23 @@ function getStatusMessage({ status, description }: SignatureVerificationResult) } function ImagePageSignatureVerification({ results }: ImagePageSignatureVerificationProps) { + const permissions = useIntegrationPermissions(); + const getIntegrationDetailsUrl = (verifierId: string): string => { + return `${integrationsPath}/signatureIntegrations/signature/view/${verifierId}`; + }; + + const renderIntegrationCell = (result: SignatureVerificationResult) => { + const displayName = result.verifierName || result.verifierId; + + // Show as link only if user has permissions. + if (permissions.signatureIntegrations.read) { + return {displayName}; + } + + // Fallback to plain text. + return displayName; + }; + return ( <> @@ -74,7 +95,9 @@ function ImagePageSignatureVerification({ results }: ImagePageSignatureVerificat }} > - {result.verifierId} + + {renderIntegrationCell(result)} + {getStatusMessage(result)} diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx index 3e9af5f586ddc..aac87bc2b31b5 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx @@ -54,6 +54,7 @@ export const imageDetailsFragment = gql` verificationTime verifiedImageReferences verifierId + verifierName } } baseImage { diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/types.ts b/ui/apps/platform/src/Containers/Vulnerabilities/types.ts index 72a7dd28a0c18..807ea5c19df02 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/types.ts +++ b/ui/apps/platform/src/Containers/Vulnerabilities/types.ts @@ -114,4 +114,5 @@ export type SignatureVerificationResult = { verificationTime: string | undefined; // ISO 8601 formatted date time. verifiedImageReferences: string[]; verifierId: string; // Signature integration id of the form `io.stackrox.signatureintegration.`. + verifierName: string; // Signature integration name. }; From fcc96f3da472a289fb6b09b58e7098ae41961e14 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Wed, 11 Feb 2026 11:07:51 -0500 Subject: [PATCH 164/232] ROX-32719: use view for ListSecret (#18600) Co-authored-by: Claude Opus 4.6 --- central/secret/datastore/datastore_impl.go | 55 ++++-- central/secret/datastore/datastore_test.go | 180 +++++++++++++++++- .../secret/datastore/list_secret_response.go | 37 ++++ pkg/search/postgres/common.go | 14 +- pkg/search/postgres/common_test.go | 5 +- pkg/search/postgres/field_detection.go | 37 ++++ pkg/search/postgres/joins.go | 29 ++- pkg/search/postgres/query/common.go | 5 + pkg/search/postgres/select.go | 93 +++++++-- pkg/search/postgres/testutils.go | 2 +- 10 files changed, 407 insertions(+), 50 deletions(-) create mode 100644 central/secret/datastore/list_secret_response.go create mode 100644 pkg/search/postgres/field_detection.go diff --git a/central/secret/datastore/datastore_impl.go b/central/secret/datastore/datastore_impl.go index 5c404752284f4..474903bed194c 100644 --- a/central/secret/datastore/datastore_impl.go +++ b/central/secret/datastore/datastore_impl.go @@ -9,18 +9,24 @@ import ( v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/postgres" + pkgSchema "github.com/stackrox/rox/pkg/postgres/schema" pkgSearch "github.com/stackrox/rox/pkg/search" - "github.com/stackrox/rox/pkg/secret/convert" + "github.com/stackrox/rox/pkg/search/paginated" + pgSearch "github.com/stackrox/rox/pkg/search/postgres" ) type datastoreImpl struct { storage store.Store + // TODO(ROX-31142): No way to call RunSelectRequestForSchemaFn via + // the store so we have to allow for access to the DB here. + db postgres.DB } func newPostgres(db postgres.DB) DataStore { dbStore := pgStore.New(db) return &datastoreImpl{ storage: dbStore, + db: db, } } @@ -62,12 +68,38 @@ func (d *datastoreImpl) SearchSecrets(ctx context.Context, q *v1.Query) ([]*v1.S } func (d *datastoreImpl) SearchListSecrets(ctx context.Context, request *v1.Query) ([]*storage.ListSecret, error) { - results, err := d.Search(ctx, request) + query := request.CloneVT() + if query == nil { + query = pkgSearch.EmptyQuery() + } + + defaultSort := &v1.QuerySortOption{ + Field: pkgSearch.CreatedTime.String(), + Reversed: false, + } + query = paginated.FillDefaultSortOption(query, defaultSort) + + query.Selects = []*v1.QuerySelect{ + pkgSearch.NewQuerySelect(pkgSearch.SecretID).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.SecretName).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.ClusterID).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.Cluster).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.Namespace).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.CreatedTime).Proto(), + pkgSearch.NewQuerySelect(pkgSearch.SecretType).Proto(), + } + + var listSecrets []*storage.ListSecret + err := pgSearch.RunSelectRequestForSchemaFn(ctx, d.db, pkgSchema.SecretsSchema, query, + func(r *listSecretResponse) error { + listSecrets = append(listSecrets, r.toListSecret()) + return nil + }) if err != nil { return nil, err } - secrets, _, err := d.resultsToListSecrets(ctx, results) - return secrets, err + + return listSecrets, nil } func (d *datastoreImpl) SearchRawSecrets(ctx context.Context, request *v1.Query) ([]*storage.Secret, error) { @@ -100,21 +132,6 @@ func (d *datastoreImpl) Count(ctx context.Context, q *v1.Query) (int, error) { return d.storage.Count(ctx, q) } -// ToSecrets returns the secrets from the db for the given search results. -func (d *datastoreImpl) resultsToListSecrets(ctx context.Context, results []pkgSearch.Result) ([]*storage.ListSecret, []int, error) { - ids := pkgSearch.ResultsToIDs(results) - - secrets, missingIndices, err := d.storage.GetMany(ctx, ids) - if err != nil { - return nil, nil, err - } - listSecrets := make([]*storage.ListSecret, 0, len(secrets)) - for _, s := range secrets { - listSecrets = append(listSecrets, convert.SecretToSecretList(s)) - } - return listSecrets, missingIndices, nil -} - type SecretSearchResultConverter struct{} func (c *SecretSearchResultConverter) BuildName(result *pkgSearch.Result) string { diff --git a/central/secret/datastore/datastore_test.go b/central/secret/datastore/datastore_test.go index 393c8ddc19fd3..313887e810a03 100644 --- a/central/secret/datastore/datastore_test.go +++ b/central/secret/datastore/datastore_test.go @@ -5,6 +5,7 @@ package datastore import ( "context" "testing" + "time" v1 "github.com/stackrox/rox/generated/api/v1" "github.com/stackrox/rox/generated/storage" @@ -12,6 +13,7 @@ import ( "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/pgtest" "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" "github.com/stackrox/rox/pkg/sac/resources" "github.com/stackrox/rox/pkg/search" @@ -45,6 +47,11 @@ func (suite *SecretDataStoreTestSuite) SetupSuite() { sac.ResourceScopeKeys(resources.Secret))) } +func (suite *SecretDataStoreTestSuite) TearDownTest() { + _, err := suite.pool.Exec(suite.ctx, "TRUNCATE TABLE secrets CASCADE") + suite.Require().NoError(err) +} + func (suite *SecretDataStoreTestSuite) TearDownSuite() { suite.pool.Close() } @@ -78,6 +85,169 @@ func (suite *SecretDataStoreTestSuite) assertSearchResults(q *v1.Query, s *stora } } +func (suite *SecretDataStoreTestSuite) TestSearchListSecrets() { + secret1 := fixtures.GetSecret() + secret1.Id = uuid.NewV4().String() + secret1.Name = "test-secret-1" + secret1.Namespace = "default" + secret1.Files = []*storage.SecretDataFile{ + {Type: storage.SecretType_PUBLIC_CERTIFICATE}, + {Type: storage.SecretType_RSA_PRIVATE_KEY}, + } + + secret2 := fixtures.GetSecret() + secret2.Id = uuid.NewV4().String() + secret2.Name = "test-secret-2" + secret2.Namespace = "kube-system" + secret2.Files = []*storage.SecretDataFile{ + {Type: storage.SecretType_IMAGE_PULL_SECRET}, + } + + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret1)) + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret2)) + + // Test retrieval with empty query + results, err := suite.datastore.SearchListSecrets(suite.ctx, search.EmptyQuery()) + suite.NoError(err) + suite.Equal(len(results), 2) + + // Find our test secrets + var found1, found2 *storage.ListSecret + for _, r := range results { + if r.GetId() == secret1.GetId() { + found1 = r + } else if r.GetId() == secret2.GetId() { + found2 = r + } + } + + suite.NotNil(found1) + suite.Equal(secret1.GetName(), found1.GetName()) + suite.Equal(secret1.GetNamespace(), found1.GetNamespace()) + suite.ElementsMatch( + []storage.SecretType{storage.SecretType_PUBLIC_CERTIFICATE, storage.SecretType_RSA_PRIVATE_KEY}, + found1.GetTypes(), + ) + + suite.NotNil(found2) + suite.Equal([]storage.SecretType{storage.SecretType_IMAGE_PULL_SECRET}, found2.GetTypes()) +} + +func (suite *SecretDataStoreTestSuite) TestSearchListSecrets_NoFiles() { + // Test secret with no files (should return UNDETERMINED type) + secret := fixtures.GetSecret() + secret.Id = uuid.NewV4().String() + secret.Name = "empty-secret" + secret.Files = nil // No files + + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret)) + + query := search.NewQueryBuilder().AddStrings(search.SecretID, secret.GetId()).ProtoQuery() + results, err := suite.datastore.SearchListSecrets(suite.ctx, query) + + suite.NoError(err) + suite.Len(results, 1) + suite.Equal([]storage.SecretType{storage.SecretType_UNDETERMINED}, results[0].GetTypes()) +} + +func (suite *SecretDataStoreTestSuite) TestSearchListSecrets_WithFilter() { + // Test that search filters work correctly with single-pass query + secret1 := fixtures.GetSecret() + secret1.Id = uuid.NewV4().String() + secret1.Name = "filter-test-1" + secret1.Namespace = "default" + + secret2 := fixtures.GetSecret() + secret2.Id = uuid.NewV4().String() + secret2.Name = "filter-test-2" + secret2.Namespace = "kube-system" + + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret1)) + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret2)) + + // Filter by namespace + query := search.NewQueryBuilder().AddExactMatches(search.Namespace, "kube-system").ProtoQuery() + results, err := suite.datastore.SearchListSecrets(suite.ctx, query) + + suite.NoError(err) + // Find our test secret in results + var found *storage.ListSecret + for _, r := range results { + if r.GetId() == secret2.GetId() { + found = r + break + } + } + suite.NotNil(found) + suite.Equal("kube-system", found.GetNamespace()) +} + +func (suite *SecretDataStoreTestSuite) TestSearchListSecrets_DuplicateTypes() { + // Test that framework correctly deduplicates types via jsonb_agg + secret := fixtures.GetSecret() + secret.Id = uuid.NewV4().String() + secret.Name = "duplicate-types-secret" + secret.Files = []*storage.SecretDataFile{ + {Type: storage.SecretType_PUBLIC_CERTIFICATE}, + {Type: storage.SecretType_PUBLIC_CERTIFICATE}, // Duplicate + {Type: storage.SecretType_RSA_PRIVATE_KEY}, + } + + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret)) + + query := search.NewQueryBuilder().AddStrings(search.SecretID, secret.GetId()).ProtoQuery() + results, err := suite.datastore.SearchListSecrets(suite.ctx, query) + + suite.NoError(err) + suite.Len(results, 1) + // Should have only 2 unique types + suite.Len(results[0].GetTypes(), 2) + suite.ElementsMatch( + []storage.SecretType{storage.SecretType_PUBLIC_CERTIFICATE, storage.SecretType_RSA_PRIVATE_KEY}, + results[0].GetTypes(), + ) +} + +func (suite *SecretDataStoreTestSuite) TestSearchListSecrets_DefaultSortOrder() { + now := time.Now() + older := now.Add(-1 * time.Hour) + newer := now.Add(1 * time.Hour) + + secret1 := fixtures.GetSecret() + secret1.Id = uuid.NewV4().String() + secret1.Name = "newer-secret" + secret1.CreatedAt = protocompat.ConvertTimeToTimestampOrNil(&newer) + + secret2 := fixtures.GetSecret() + secret2.Id = uuid.NewV4().String() + secret2.Name = "older-secret" + secret2.CreatedAt = protocompat.ConvertTimeToTimestampOrNil(&older) + + // Insert newer first to ensure ordering comes from sort, not insertion order + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret1)) + suite.NoError(suite.datastore.UpsertSecret(suite.ctx, secret2)) + + // Default sort is CreatedTime ascending + results, err := suite.datastore.SearchListSecrets(suite.ctx, search.EmptyQuery()) + suite.NoError(err) + suite.Require().Len(results, 2) + suite.Equal(secret2.GetId(), results[0].GetId(), "older secret should be first in ascending order") + suite.Equal(secret1.GetId(), results[1].GetId(), "newer secret should be second in ascending order") + + // Caller-provided descending sort should override default + q := search.EmptyQuery() + q.Pagination = &v1.QueryPagination{ + SortOptions: []*v1.QuerySortOption{ + {Field: search.CreatedTime.String(), Reversed: true}, + }, + } + results, err = suite.datastore.SearchListSecrets(suite.ctx, q) + suite.NoError(err) + suite.Require().Len(results, 2) + suite.Equal(secret1.GetId(), results[0].GetId(), "newer secret should be first in descending order") + suite.Equal(secret2.GetId(), results[1].GetId(), "older secret should be second in descending order") +} + func (suite *SecretDataStoreTestSuite) TestSecretsDataStore() { secret := fixtures.GetSecret() err := suite.datastore.UpsertSecret(suite.ctx, secret) @@ -187,14 +357,4 @@ func (suite *SecretDataStoreTestSuite) TestSearchSecrets() { } }) } - - // Clean up - suite.NoError(suite.datastore.RemoveSecret(suite.ctx, secret1.GetId())) - suite.NoError(suite.datastore.RemoveSecret(suite.ctx, secret2.GetId())) - suite.NoError(suite.datastore.RemoveSecret(suite.ctx, secret3.GetId())) - - // Verify cleanup - results, err := suite.datastore.SearchSecrets(suite.ctx, search.EmptyQuery()) - suite.NoError(err) - suite.Empty(results) } diff --git a/central/secret/datastore/list_secret_response.go b/central/secret/datastore/list_secret_response.go new file mode 100644 index 0000000000000..b3a555a432984 --- /dev/null +++ b/central/secret/datastore/list_secret_response.go @@ -0,0 +1,37 @@ +package datastore + +import ( + "time" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/protocompat" +) + +// listSecretResponse is a helper struct for scanning ListSecret query results. +type listSecretResponse struct { + ID string `db:"secret_id"` + Name string `db:"secret"` + ClusterID string `db:"cluster_id"` + ClusterName string `db:"cluster"` + Namespace string `db:"namespace"` + CreatedAt *time.Time `db:"created_time"` + Types []storage.SecretType `db:"secret_type"` +} + +// toListSecret converts the database response to a storage.ListSecret protobuf. +func (r *listSecretResponse) toListSecret() *storage.ListSecret { + types := r.Types + if len(types) == 0 { + types = []storage.SecretType{storage.SecretType_UNDETERMINED} + } + + return &storage.ListSecret{ + Id: r.ID, + Name: r.Name, + ClusterId: r.ClusterID, + ClusterName: r.ClusterName, + Namespace: r.Namespace, + CreatedAt: protocompat.ConvertTimeToTimestampOrNil(r.CreatedAt), + Types: types, + } +} diff --git a/pkg/search/postgres/common.go b/pkg/search/postgres/common.go index 863c9f45fedb9..a6398f16a92f6 100644 --- a/pkg/search/postgres/common.go +++ b/pkg/search/postgres/common.go @@ -110,6 +110,10 @@ type query struct { // This field indicates if 'Distinct' is applied in the select portion of the query DistinctAppliedToSelects bool + + // HasChildTableFields indicates if any selected fields come from child tables and need + // aggregation. Used to trigger automatic GROUP BY primary keys in SELECT queries. + HasChildTableFields bool } type groupByEntry struct { @@ -241,7 +245,11 @@ func (q *query) getPortionBeforeFromClause() string { selectStrs := make([]string, 0, len(allSelectFields)) for _, field := range allSelectFields { - if q.groupByNonPKFields() && !field.FromGroupBy && !field.DerivedField { + if field.ChildTableAgg { + selectStrs = append(selectStrs, + fmt.Sprintf("COALESCE(array_agg(DISTINCT %s) FILTER (WHERE %s IS NOT NULL), '{}') as %s", + field.SelectPath, field.SelectPath, field.Alias)) + } else if q.groupByNonPKFields() && !field.FromGroupBy && !field.DerivedField { selectStrs = append(selectStrs, fmt.Sprintf("jsonb_agg(%s) as %s", field.SelectPath, field.Alias)) } else { selectStrs = append(selectStrs, field.PathForSelectPortion()) @@ -470,7 +478,7 @@ func standardizeQueryAndPopulatePath(ctx context.Context, q *v1.Query, schema *w return nil, sacErr } standardizeFieldNamesInQuery(q) - joins, dbFields := getJoinsAndFields(schema, q) + joins, dbFields := getJoinsAndFields(schema, q, nil) queryEntry, err := compileQueryToPostgres(schema, q, dbFields, nowForQuery) if err != nil { @@ -512,7 +520,7 @@ func standardizeQueryAndPopulatePath(ctx context.Context, q *v1.Query, schema *w // If selects are provided in a SEARCH query, process them to enable single-pass SearchResult construction (ROX-29943) if len(q.GetSelects()) > 0 && queryType == SEARCH { - if err := populateSelect(parsedQuery, schema, q.GetSelects(), dbFields, nowForQuery); err != nil { + if err := populateSelect(parsedQuery, schema, q, dbFields, nowForQuery, nil); err != nil { return nil, errors.Wrapf(err, "failed to parse select portion of query -- %s --", q.String()) } } diff --git a/pkg/search/postgres/common_test.go b/pkg/search/postgres/common_test.go index 194de671f4978..6ae96c2cd540c 100644 --- a/pkg/search/postgres/common_test.go +++ b/pkg/search/postgres/common_test.go @@ -742,8 +742,11 @@ func TestSelectQueries(t *testing.T) { if c.ctx == nil { ctx = sac.WithAllAccess(context.Background()) } + + sacCtx := sac.WithAllAccess(ctx) testSchema := c.schema - actualQ, err := standardizeSelectQueryAndPopulatePath(ctx, c.q, testSchema, SELECT) + // No type info in test, so arrayFields is nil + actualQ, err := standardizeSelectQueryAndPopulatePath(sacCtx, c.q, testSchema, SELECT, nil) if c.expectedError != "" { assert.Error(t, err, c.expectedError) return diff --git a/pkg/search/postgres/field_detection.go b/pkg/search/postgres/field_detection.go new file mode 100644 index 0000000000000..c0e3d4536b142 --- /dev/null +++ b/pkg/search/postgres/field_detection.go @@ -0,0 +1,37 @@ +package postgres + +import ( + "github.com/stackrox/rox/pkg/postgres/walker" +) + +// isChildTableField returns true if the field comes from a child table +func isChildTableField(field *walker.Field, schema *walker.Schema) bool { + if field == nil || schema == nil { + return false + } + + if field.Schema.Table == schema.Table { + return false + } + + return isChildTable(field.Schema.Table, schema) +} + +// isChildTable returns true if the given table name is a child table of the schema +func isChildTable(tableName string, schema *walker.Schema) bool { + if schema == nil { + return false + } + + for _, child := range schema.Children { + if child.Table == tableName { + return true + } + // Recursively check nested children (grandchildren, etc.) + if isChildTable(tableName, child) { + return true + } + } + + return false +} diff --git a/pkg/search/postgres/joins.go b/pkg/search/postgres/joins.go index 99f66c825003b..10313160cf22e 100644 --- a/pkg/search/postgres/joins.go +++ b/pkg/search/postgres/joins.go @@ -147,7 +147,8 @@ func collectFields(q *v1.Query) (set.StringSet, set.StringSet) { nullableFields.AddAll(nullable.AsSlice()...) } for _, selectField := range q.GetSelects() { - collectedFields.Add(selectField.GetField().GetName()) + fieldName := selectField.GetField().GetName() + collectedFields.Add(fieldName) collected, nullable := collectFields(selectField.GetFilter().GetQuery()) collectedFields.AddAll(collected.AsSlice()...) nullableFields.AddAll(nullable.AsSlice()...) @@ -166,9 +167,19 @@ type searchFieldMetadata struct { derivedMetadata *walker.DerivedSearchField } -func getJoinsAndFields(src *walker.Schema, q *v1.Query) ([]Join, map[string]searchFieldMetadata) { +// isChildSchema checks if a schema is a child (or grandchild, etc.) of the parent schema. +func isChildSchema(childSchema *walker.Schema, parentSchema *walker.Schema) bool { + if childSchema == nil || parentSchema == nil { + return false + } + return isChildTable(childSchema.Table, parentSchema) +} + +func getJoinsAndFields(src *walker.Schema, q *v1.Query, arrayFields map[string]bool) ([]Join, map[string]searchFieldMetadata) { unreachedFields, nullableFields := collectFields(q) + hasGroupBy := len(q.GetGroupBy().GetFields()) > 0 + joinTreeRoot := &joinTreeNode{ currNode: src, } @@ -194,6 +205,13 @@ func getJoinsAndFields(src *walker.Schema, q *v1.Query) ([]Join, map[string]sear if nullableFields.Remove(lowerCaseName) { joinType = Left } + if arrayFields != nil && isChildSchema(currElem.schema, src) && !hasGroupBy { + // Compute alias using the same formula as selectQueryField + alias := strings.Join(strings.Fields(lowerCaseName), "_") + if arrayFields[alias] { + joinType = Left + } + } } } @@ -211,6 +229,7 @@ func getJoinsAndFields(src *walker.Schema, q *v1.Query) ([]Join, map[string]sear } } } + // We found a field in this schema; if this is not the root schema itself, we'll need to add it to the join tree. if len(reachableFields) > numReachableFieldsBefore && len(currElem.pathFromRoot) > 0 { joinTreeRoot.addPathToTree(currElem.pathFromRoot, currElem.schema, joinType) @@ -238,8 +257,10 @@ func getJoinsAndFields(src *walker.Schema, q *v1.Query) ([]Join, map[string]sear } if src.SearchScope == nil { queue = append(queue, newElem) - } else if _, foundInSearchScope := src.SearchScope[newElem.schema.OptionsMap.PrimaryCategory()]; foundInSearchScope { - queue = append(queue, newElem) + } else if newElem.schema.OptionsMap != nil { + if _, foundInSearchScope := src.SearchScope[newElem.schema.OptionsMap.PrimaryCategory()]; foundInSearchScope { + queue = append(queue, newElem) + } } } } diff --git a/pkg/search/postgres/query/common.go b/pkg/search/postgres/query/common.go index 68aae8b572ac9..6f6dffccfdff3 100644 --- a/pkg/search/postgres/query/common.go +++ b/pkg/search/postgres/query/common.go @@ -33,6 +33,11 @@ type SelectQueryField struct { // Default: false (for selected-only fields); queryEntry fields set this to true. IncludeInMatches bool + // ChildTableAgg indicates that this field comes from a child table and should be aggregated + // using array_agg() or jsonb_agg() when selected. This is used for single-pass queries that + // fetch both parent and child table data. + ChildTableAgg bool + // PostTransform is a function that will be applied to the returned rows from SQL before // further processing. // The input will be of the type directly returned from the postgres rows.Scan function. diff --git a/pkg/search/postgres/select.go b/pkg/search/postgres/select.go index bf2673a514641..1d5fe984a2ffa 100644 --- a/pkg/search/postgres/select.go +++ b/pkg/search/postgres/select.go @@ -3,6 +3,7 @@ package postgres import ( "context" "fmt" + "reflect" "runtime/debug" "strings" "time" @@ -18,10 +19,47 @@ import ( "github.com/stackrox/rox/pkg/search/paginated" "github.com/stackrox/rox/pkg/search/postgres/aggregatefunc" pgsearch "github.com/stackrox/rox/pkg/search/postgres/query" + "github.com/stackrox/rox/pkg/sync" "github.com/stackrox/rox/pkg/utils" ) -var scanAPI = newScanAPI(newDBScanAPI(dbscan.WithAllowUnknownColumns(true))) +var ( + scanAPI = newScanAPI(newDBScanAPI(dbscan.WithAllowUnknownColumns(true))) + arrayFieldsCache sync.Map // reflect.Type -> map[string]bool +) + +// getArrayFieldsFromType returns a map of db tag names to whether the field is a +// slice type. Results are cached per type since reflect inspection is invariant. +func getArrayFieldsFromType[T any]() map[string]bool { + var zero T + t := reflect.TypeOf(zero) + + if t != nil && t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t == nil || t.Kind() != reflect.Struct { + return nil + } + + if cached, ok := arrayFieldsCache.Load(t); ok { + return cached.(map[string]bool) + } + + arrayFields := make(map[string]bool) + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + dbTag := field.Tag.Get("db") + if dbTag == "" || dbTag == "-" { + continue + } + if field.Type.Kind() == reflect.Slice { + arrayFields[dbTag] = true + } + } + + arrayFieldsCache.Store(t, arrayFields) + return arrayFields +} func newScanAPI(dbscanAPI *dbscan.API) *pgxscan.API { api, err := pgxscan.NewAPI(dbscanAPI) @@ -73,7 +111,10 @@ func RunSelectRequestForSchemaFn[T any](ctx context.Context, db postgres.DB, sch } }() - query, err = standardizeSelectQueryAndPopulatePath(ctx, q, schema, SELECT) + // Extract array fields from destination type T to automatically detect child table aggregation + arrayFields := getArrayFieldsFromType[T]() + + query, err = standardizeSelectQueryAndPopulatePath(ctx, q, schema, SELECT, arrayFields) if err != nil { return err } @@ -86,7 +127,7 @@ func RunSelectRequestForSchemaFn[T any](ctx context.Context, db postgres.DB, sch }) } -func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, schema *walker.Schema, queryType QueryType) (*query, error) { +func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, schema *walker.Schema, queryType QueryType, arrayFields map[string]bool) (*query, error) { nowForQuery := time.Now() var err error @@ -101,7 +142,7 @@ func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, sch } standardizeFieldNamesInQuery(q) - joins, dbFields := getJoinsAndFields(schema, q) + joins, dbFields := getJoinsAndFields(schema, q, arrayFields) if len(q.GetSelects()) == 0 && q.GetQuery() == nil { return nil, nil } @@ -113,7 +154,7 @@ func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, sch Joins: joins, } - if err = populateSelect(parsedQuery, schema, q.GetSelects(), dbFields, nowForQuery); err != nil { + if err = populateSelect(parsedQuery, schema, q, dbFields, nowForQuery, arrayFields); err != nil { return nil, errors.Wrapf(err, "failed to parse select portion of query -- %s --", q.String()) } @@ -136,6 +177,21 @@ func standardizeSelectQueryAndPopulatePath(ctx context.Context, q *v1.Query, sch if err := populateGroupBy(parsedQuery, q.GetGroupBy(), schema, dbFields); err != nil { return nil, err } + if parsedQuery.HasChildTableFields && len(parsedQuery.GroupBys) == 0 { + // Group by the raw PK column (no cast) so PostgreSQL recognizes functional + // dependency and allows non-aggregated parent columns in SELECT. + // applyGroupByPrimaryKeys uses selectQueryField which adds ::text for UUIDs, + // turning the GROUP BY into an expression that breaks functional dependency. + parsedQuery.GroupByPrimaryKey = true + for _, pk := range schema.PrimaryKeys() { + parsedQuery.GroupBys = append(parsedQuery.GroupBys, groupByEntry{ + Field: pgsearch.SelectQueryField{ + SelectPath: qualifyColumn(pk.Schema.Table, pk.ColumnName, ""), + FromGroupBy: true, + }, + }) + } + } if err := populatePagination(parsedQuery, q.GetPagination(), schema, dbFields); err != nil { return nil, err } @@ -171,11 +227,14 @@ func retryableRunSelectRequestForSchemaFn[T any](ctx context.Context, db postgre return rows.Err() } -func populateSelect(querySoFar *query, schema *walker.Schema, querySelects []*v1.QuerySelect, queryFields map[string]searchFieldMetadata, nowForQuery time.Time) error { +func populateSelect(querySoFar *query, schema *walker.Schema, q *v1.Query, queryFields map[string]searchFieldMetadata, nowForQuery time.Time, arrayFields map[string]bool) error { + querySelects := q.GetSelects() if len(querySelects) == 0 { return errors.New("select portion of the query cannot be empty") } + hasGroupBy := len(q.GetGroupBy().GetFields()) > 0 + for idx, qs := range querySelects { field := qs.GetField() fieldMetadata := queryFields[field.GetName()] @@ -183,16 +242,22 @@ func populateSelect(querySoFar *query, schema *walker.Schema, querySelects []*v1 if dbField == nil { return errors.Errorf("field %s in select portion of query does not exist in table %s or connected tables", field, schema.Table) } + + isChildField := isChildTableField(dbField, schema) + // TODO(mandar): Add support for the following. - if dbField.DataType == postgres.StringArray || dbField.DataType == postgres.IntArray || - dbField.DataType == postgres.EnumArray || dbField.DataType == postgres.Map { - return errors.Errorf("field %s in select portion of query is unsupported", field) + if !isChildField && (dbField.DataType == postgres.StringArray || dbField.DataType == postgres.IntArray || + dbField.DataType == postgres.EnumArray || dbField.DataType == postgres.Map) { + return errors.Errorf("array field %s in parent table is unsupported in select", field) } if qs.GetFilter() == nil { - querySoFar.SelectedFields = append(querySoFar.SelectedFields, - selectQueryField(field.GetName(), dbField, field.GetDistinct(), aggregatefunc.GetAggrFunc(field.GetAggregateFunc()), ""), - ) + selectField := selectQueryField(field.GetName(), dbField, field.GetDistinct(), aggregatefunc.GetAggrFunc(field.GetAggregateFunc()), "") + if arrayFields != nil && isChildField && !hasGroupBy && arrayFields[strings.ToLower(selectField.Alias)] { + selectField.ChildTableAgg = true + querySoFar.HasChildTableFields = true + } + querySoFar.SelectedFields = append(querySoFar.SelectedFields, selectField) querySoFar.DistinctAppliedToSelects = querySoFar.DistinctAppliedToSelects || field.GetDistinct() continue } @@ -213,6 +278,10 @@ func populateSelect(querySoFar *query, schema *walker.Schema, querySelects []*v1 querySoFar.Data = append(querySoFar.Data, qe.Where.Values...) selectField := selectQueryField(field.GetName(), dbField, field.GetDistinct(), aggregatefunc.GetAggrFunc(field.GetAggregateFunc()), qe.Where.Query) + if arrayFields != nil && isChildField && !hasGroupBy && arrayFields[strings.ToLower(selectField.Alias)] { + selectField.ChildTableAgg = true + querySoFar.HasChildTableFields = true + } querySoFar.DistinctAppliedToSelects = querySoFar.DistinctAppliedToSelects || field.GetDistinct() if alias := filter.GetName(); alias != "" { selectField.Alias = alias diff --git a/pkg/search/postgres/testutils.go b/pkg/search/postgres/testutils.go index 5ae5c1eef015a..50d65d16ec9c0 100644 --- a/pkg/search/postgres/testutils.go +++ b/pkg/search/postgres/testutils.go @@ -13,7 +13,7 @@ import ( // AssertSQLQueryString a utility function for test purpose. func AssertSQLQueryString(t testing.TB, q *v1.Query, schema *walker.Schema, expected string) { ctx := sac.WithAllAccess(context.Background()) - actual, err := standardizeSelectQueryAndPopulatePath(ctx, q, schema, SELECT) + actual, err := standardizeSelectQueryAndPopulatePath(ctx, q, schema, SELECT, nil) assert.NoError(t, err) assert.Equal(t, expected, actual.AsSQL()) } From 960df80e2716a479c68522625553059f4b0e528d Mon Sep 17 00:00:00 2001 From: Alex Vulaj Date: Wed, 11 Feb 2026 11:29:31 -0500 Subject: [PATCH 165/232] docs: Add database column addition workflow to AGENTS.md (#18950) --- AGENTS.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 18e37bb07003f..016880fbcf31c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -85,6 +85,17 @@ When creating pull requests, you must follow these requirements: - `make go-generated-srcs` - Generate Go code (mockgen, stringer, easyjson) - `make generated-srcs` - Generate all source code +### Adding Database Columns +When the user needs to add a new column to an existing database table: +- Read `migrator/README.md` for the authoritative migration workflow and examples +- Prompt the user about their specific needs (column type, backfill requirements, performance considerations) +- Present options based on similar migrations in `migrator/migrations/` directory +- Reference the "Code Generation Commands" section above for regenerating schema/store code +- Suggest running code generation commands in the background if appropriate (they can be slow) +- After code generation, run code quality checks from the "Code Quality Commands" section above + +The migrator README contains detailed examples of frozen schemas, GORM usage patterns, and migration best practices. + ### Local Development Commands - `./deploy/deploy-local.sh` - Deploy StackRox locally (requires existing k8s cluster) - `make install-dev-tools` - Install development tools (linters, generators) From 37ec75c6c3bea745da2e4c3ff88d8336a9a51c96 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Wed, 11 Feb 2026 20:24:47 +0100 Subject: [PATCH 166/232] ROX-31216: add postgres integration test for image service (#18979) --- .../service/service_impl_postgres_test.go | 230 ++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 central/image/service/service_impl_postgres_test.go diff --git a/central/image/service/service_impl_postgres_test.go b/central/image/service/service_impl_postgres_test.go new file mode 100644 index 0000000000000..0cd042900fca5 --- /dev/null +++ b/central/image/service/service_impl_postgres_test.go @@ -0,0 +1,230 @@ +//go:build sql_integration + +package service + +import ( + "context" + "errors" + "io" + "testing" + + imageDataStore "github.com/stackrox/rox/central/image/datastore" + policyDataStoreMock "github.com/stackrox/rox/central/policy/datastore/mocks" + signatureIntegrationDS "github.com/stackrox/rox/central/signatureintegration/datastore" + signatureIntegrationPostgres "github.com/stackrox/rox/central/signatureintegration/store/postgres" + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + pkgGRPC "github.com/stackrox/rox/pkg/grpc" + "github.com/stackrox/rox/pkg/postgres/pgtest" + "github.com/stackrox/rox/pkg/sac" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" +) + +func TestImageServicePostgres(t *testing.T) { + suite.Run(t, new(imageServicePostgresTestSuite)) +} + +type imageServicePostgresTestSuite struct { + suite.Suite + + pool *pgtest.TestPostgres + imageDS imageDataStore.DataStore + sigIntegrationDS signatureIntegrationDS.DataStore + service Service + + ctx context.Context +} + +func (s *imageServicePostgresTestSuite) SetupTest() { + s.pool = pgtest.ForT(s.T()) + + s.imageDS = imageDataStore.GetTestPostgresDataStore(s.T(), s.pool) + + sigStore := signatureIntegrationPostgres.New(s.pool) + policyMock := policyDataStoreMock.NewMockDataStore(gomock.NewController(s.T())) + s.sigIntegrationDS = signatureIntegrationDS.New(sigStore, policyMock) + + s.service = New(s.imageDS, nil, s.imageDS, nil, nil, nil, nil, nil, nil, nil, nil, nil, s.sigIntegrationDS) + + s.ctx = sac.WithAllAccess(context.Background()) +} + +func (s *imageServicePostgresTestSuite) TestGetImageEnrichesVerifierName() { + // Create a signature integration with a known name. + integration := newTestSignatureIntegration("my-cosign-verifier") + savedIntegration, err := s.sigIntegrationDS.AddSignatureIntegration(s.ctx, integration) + s.Require().NoError(err) + s.Require().NotNil(savedIntegration) + + // Create an image with a verification result referencing this integration. + image := newTestImageWithVerificationResults("sha256:aaa111", []*storage.ImageSignatureVerificationResult{ + { + VerifierId: savedIntegration.GetId(), + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + }) + s.Require().NoError(s.imageDS.UpsertImage(s.ctx, image)) + + // Call GetImage and verify that VerifierName is populated. + resp, err := s.service.GetImage(s.ctx, &v1.GetImageRequest{Id: image.GetId()}) + s.Require().NoError(err) + s.Require().NotNil(resp) + + results := resp.GetSignatureVerificationData().GetResults() + s.Require().Len(results, 1) + s.Equal("my-cosign-verifier", results[0].GetVerifierName()) +} + +func (s *imageServicePostgresTestSuite) TestGetImageWithUnknownVerifierId() { + // Create an image with a verification result pointing to a non-existent integration. + image := newTestImageWithVerificationResults("sha256:bbb222", []*storage.ImageSignatureVerificationResult{ + { + VerifierId: "io.stackrox.signatureintegration.non-existent-id", + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + }) + s.Require().NoError(s.imageDS.UpsertImage(s.ctx, image)) + + resp, err := s.service.GetImage(s.ctx, &v1.GetImageRequest{Id: image.GetId()}) + s.Require().NoError(err) + s.Require().NotNil(resp) + + results := resp.GetSignatureVerificationData().GetResults() + s.Require().Len(results, 1) + s.Empty(results[0].GetVerifierName(), "VerifierName should be empty for unknown integration ID") +} + +func (s *imageServicePostgresTestSuite) TestGetImageWithEmptyVerifierId() { + // Create an image with a verification result where VerifierId is empty. + image := newTestImageWithVerificationResults("sha256:ccc333", []*storage.ImageSignatureVerificationResult{ + { + VerifierId: "", + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + }) + s.Require().NoError(s.imageDS.UpsertImage(s.ctx, image)) + + resp, err := s.service.GetImage(s.ctx, &v1.GetImageRequest{Id: image.GetId()}) + s.Require().NoError(err) + s.Require().NotNil(resp) + + results := resp.GetSignatureVerificationData().GetResults() + s.Require().Len(results, 1) + s.Empty(results[0].GetVerifierName(), "VerifierName should be empty when VerifierId is empty") +} + +func (s *imageServicePostgresTestSuite) TestGetImageWithMultipleVerificationResults() { + // Create two signature integrations with different names. + integration1 := newTestSignatureIntegration("verifier-alpha") + saved1, err := s.sigIntegrationDS.AddSignatureIntegration(s.ctx, integration1) + s.Require().NoError(err) + + integration2 := newTestSignatureIntegration("verifier-beta") + saved2, err := s.sigIntegrationDS.AddSignatureIntegration(s.ctx, integration2) + s.Require().NoError(err) + + // Create an image with multiple verification results. + image := newTestImageWithVerificationResults("sha256:ddd444", []*storage.ImageSignatureVerificationResult{ + { + VerifierId: saved1.GetId(), + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + { + VerifierId: saved2.GetId(), + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + }) + s.Require().NoError(s.imageDS.UpsertImage(s.ctx, image)) + + resp, err := s.service.GetImage(s.ctx, &v1.GetImageRequest{Id: image.GetId()}) + s.Require().NoError(err) + s.Require().NotNil(resp) + + results := resp.GetSignatureVerificationData().GetResults() + s.Require().Len(results, 2) + + // Build a map of verifier ID to verifier name for order-independent assertion. + nameByID := make(map[string]string, len(results)) + for _, r := range results { + nameByID[r.GetVerifierId()] = r.GetVerifierName() + } + s.Equal("verifier-alpha", nameByID[saved1.GetId()]) + s.Equal("verifier-beta", nameByID[saved2.GetId()]) +} + +func (s *imageServicePostgresTestSuite) TestExportImagesEnrichesVerifierName() { + // Create a signature integration. + integration := newTestSignatureIntegration("export-verifier") + saved, err := s.sigIntegrationDS.AddSignatureIntegration(s.ctx, integration) + s.Require().NoError(err) + + // Create an image with a verification result referencing this integration. + image := newTestImageWithVerificationResults("sha256:eee555", []*storage.ImageSignatureVerificationResult{ + { + VerifierId: saved.GetId(), + Status: storage.ImageSignatureVerificationResult_VERIFIED, + }, + }) + s.Require().NoError(s.imageDS.UpsertImage(s.ctx, image)) + + // Set up a gRPC streaming server. + conn, closeFunc, err := pkgGRPC.CreateTestGRPCStreamingService( + s.ctx, + s.T(), + func(registrar grpc.ServiceRegistrar) { + v1.RegisterImageServiceServer(registrar, s.service) + }, + ) + s.Require().NoError(err) + defer closeFunc() + + client := v1.NewImageServiceClient(conn) + stream, err := client.ExportImages(s.ctx, &v1.ExportImageRequest{Timeout: 60}) + s.Require().NoError(err) + + var exported []*storage.Image + for { + resp, recvErr := stream.Recv() + if errors.Is(recvErr, io.EOF) { + break + } + s.Require().NoError(recvErr) + exported = append(exported, resp.GetImage()) + } + + s.Require().Len(exported, 1) + results := exported[0].GetSignatureVerificationData().GetResults() + s.Require().Len(results, 1) + s.Equal("export-verifier", results[0].GetVerifierName()) +} + +// newTestSignatureIntegration creates a minimal SignatureIntegration for testing. +func newTestSignatureIntegration(name string) *storage.SignatureIntegration { + return &storage.SignatureIntegration{ + Name: name, + Cosign: &storage.CosignPublicKeyVerification{ + PublicKeys: []*storage.CosignPublicKeyVerification_PublicKey{ + { + Name: "key1", + PublicKeyPemEnc: "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAryQICCl6NZ5gDKrnSztO\n3Hy8PEUcuyvg/ikC+VcIo2SFFSf18a3IMYldIugqqqZCs4/4uVW3sbdLs/6PfgdX\n7O9D22ZiFWHPYA2k2N744MNiCD1UE+tJyllUhSblK48bn+v1oZHCM0nYQ2NqUkvS\nj+hwUU3RiWl7x3D2s9wSdNt7XUtW05a/FXehsPSiJfKvHJJnGOX0BgTvkLnkAOTd\nOrUZ/wK69Dzu4IvrN4vs9Nes8vbwPa/ddZEzGR0cQMt0JBkhk9kU/qwqUseP1QRJ\n5I1jR4g8aYPL/ke9K35PxZWuDp3U0UPAZ3PjFAh+5T+fc7gzCs9dPzSHloruU+gl\nFQIDAQAB\n-----END PUBLIC KEY-----", + }, + }, + }, + } +} + +// newTestImageWithVerificationResults creates a minimal Image with the given +// signature verification results. +func newTestImageWithVerificationResults(id string, results []*storage.ImageSignatureVerificationResult) *storage.Image { + return &storage.Image{ + Id: id, + Name: &storage.ImageName{ + FullName: "docker.io/library/test:" + id, + }, + SignatureVerificationData: &storage.ImageSignatureVerificationData{ + Results: results, + }, + } +} From fbd3c546886417bb9beae9215f4c7c6a37e9c11d Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Wed, 11 Feb 2026 16:05:37 -0500 Subject: [PATCH 167/232] ROX-32723: Remove two pass query for SearchListImages (#18447) --- central/imagev2/datastore/datastore.go | 1 + central/imagev2/datastore/datastore_impl.go | 20 +++ .../mapper/datastore/datastore_impl.go | 39 +----- central/imagev2/datastore/mocks/datastore.go | 15 +++ .../imagev2/datastore/store/mocks/store.go | 14 +++ .../imagev2/datastore/store/postgres/store.go | 12 ++ central/imagev2/datastore/store/store.go | 1 + .../datastoretest/datastore_impl_test.go | 116 ++++++++++++++++++ 8 files changed, 180 insertions(+), 38 deletions(-) diff --git a/central/imagev2/datastore/datastore.go b/central/imagev2/datastore/datastore.go index 2238dfa86049e..7950966e84671 100644 --- a/central/imagev2/datastore/datastore.go +++ b/central/imagev2/datastore/datastore.go @@ -20,6 +20,7 @@ type DataStore interface { Count(ctx context.Context, q *v1.Query) (int, error) SearchImages(ctx context.Context, q *v1.Query) ([]*v1.SearchResult, error) SearchRawImages(ctx context.Context, q *v1.Query) ([]*storage.ImageV2, error) + SearchListImages(ctx context.Context, q *v1.Query) ([]*storage.ListImage, error) GetImage(ctx context.Context, id string) (*storage.ImageV2, bool, error) GetImageMetadata(ctx context.Context, id string) (*storage.ImageV2, bool, error) diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index 461fb17c2bb27..6686fa0ce6c25 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -16,6 +16,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/errorhelpers" + imageTypes "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/sac" @@ -107,6 +108,25 @@ func (ds *datastoreImpl) SearchImages(ctx context.Context, q *v1.Query) ([]*v1.S return search.ResultsToSearchResultProtos(results, &ImageSearchResultConverter{}), nil } +func (ds *datastoreImpl) SearchListImages(ctx context.Context, q *v1.Query) ([]*storage.ListImage, error) { + defer metrics.SetDatastoreFunctionDuration(time.Now(), "ImageV2", "SearchListImages") + + var imgs []*storage.ListImage + err := ds.storage.WalkMetadataByQuery(ctx, q, func(img *storage.ImageV2) error { + imgs = append(imgs, imageTypes.ConvertImageToListImage(utils.ConvertToV1(img))) + return nil + }) + if err != nil { + return nil, err + } + + for _, image := range imgs { + image.Priority = ds.imageRanker.GetRankForID(image.GetId()) + } + + return imgs, nil +} + // TODO(ROX-29943): Eliminate unnecessary 2 pass database queries // SearchRawImages delegates to the underlying searcher. func (ds *datastoreImpl) SearchRawImages(ctx context.Context, q *v1.Query) ([]*storage.ImageV2, error) { diff --git a/central/imagev2/datastore/mapper/datastore/datastore_impl.go b/central/imagev2/datastore/mapper/datastore/datastore_impl.go index 69eb86475149a..a23f5a40e8e0c 100644 --- a/central/imagev2/datastore/mapper/datastore/datastore_impl.go +++ b/central/imagev2/datastore/mapper/datastore/datastore_impl.go @@ -36,44 +36,7 @@ func (ds *datastoreImpl) SearchListImages(ctx context.Context, q *v1.Query) ([]* return ds.imageDataStore.SearchListImages(ctx, q) } - // Get image IDs from search (fast) - results, err := ds.imageV2DataStore.Search(ctx, q) - if err != nil { - return nil, err - } - - // Get image metadata in bulk (much faster than GetByIDs with full scan data) - ids := make([]string, 0, len(results)) - for _, result := range results { - ids = append(ids, result.ID) - } - - images, err := ds.imageV2DataStore.GetManyImageMetadata(ctx, ids) - if err != nil { - return nil, err - } - - // Build a map for O(1) lookup when reordering - imageByID := make(map[string]*storage.ImageV2, len(images)) - for _, image := range images { - imageByID[image.GetId()] = image - } - - // Convert to list images, preserving the original search order from results - listImages := make([]*storage.ListImage, 0, len(ids)) - for _, id := range ids { - image, exists := imageByID[id] - if !exists { - // Image may have been deleted between search and lookup, skip it - continue - } - // Convert v2 to v1 - v1Image := imageUtils.ConvertToV1(image) - // Convert v1 to ListImage - listImage := types.ConvertImageToListImage(v1Image) - listImages = append(listImages, listImage) - } - return listImages, nil + return ds.imageV2DataStore.SearchListImages(ctx, q) } func (ds *datastoreImpl) ListImage(ctx context.Context, sha string) (*storage.ListImage, bool, error) { diff --git a/central/imagev2/datastore/mocks/datastore.go b/central/imagev2/datastore/mocks/datastore.go index f5e1da8a66a98..ee855eb515da0 100644 --- a/central/imagev2/datastore/mocks/datastore.go +++ b/central/imagev2/datastore/mocks/datastore.go @@ -215,6 +215,21 @@ func (mr *MockDataStoreMockRecorder) SearchImages(ctx, q any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchImages", reflect.TypeOf((*MockDataStore)(nil).SearchImages), ctx, q) } +// SearchListImages mocks base method. +func (m *MockDataStore) SearchListImages(ctx context.Context, q *v1.Query) ([]*storage.ListImage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SearchListImages", ctx, q) + ret0, _ := ret[0].([]*storage.ListImage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SearchListImages indicates an expected call of SearchListImages. +func (mr *MockDataStoreMockRecorder) SearchListImages(ctx, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchListImages", reflect.TypeOf((*MockDataStore)(nil).SearchListImages), ctx, q) +} + // SearchRawImages mocks base method. func (m *MockDataStore) SearchRawImages(ctx context.Context, q *v1.Query) ([]*storage.ImageV2, error) { m.ctrl.T.Helper() diff --git a/central/imagev2/datastore/store/mocks/store.go b/central/imagev2/datastore/store/mocks/store.go index 28b878c939b56..9ff0dee364a37 100644 --- a/central/imagev2/datastore/store/mocks/store.go +++ b/central/imagev2/datastore/store/mocks/store.go @@ -236,3 +236,17 @@ func (mr *MockStoreMockRecorder) WalkByQuery(ctx, q, fn any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalkByQuery", reflect.TypeOf((*MockStore)(nil).WalkByQuery), ctx, q, fn) } + +// WalkMetadataByQuery mocks base method. +func (m *MockStore) WalkMetadataByQuery(ctx context.Context, q *v1.Query, fn func(*storage.ImageV2) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WalkMetadataByQuery", ctx, q, fn) + ret0, _ := ret[0].(error) + return ret0 +} + +// WalkMetadataByQuery indicates an expected call of WalkMetadataByQuery. +func (mr *MockStoreMockRecorder) WalkMetadataByQuery(ctx, q, fn any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WalkMetadataByQuery", reflect.TypeOf((*MockStore)(nil).WalkMetadataByQuery), ctx, q, fn) +} diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index c246da7cd07f6..d4465171a9acd 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -897,6 +897,18 @@ func (s *storeImpl) WalkByQuery(ctx context.Context, q *v1.Query, fn func(image return nil } +func (s *storeImpl) WalkMetadataByQuery(ctx context.Context, q *v1.Query, fn func(image *storage.ImageV2) error) error { + defer metrics.SetPostgresOperationDurationTime(time.Now(), ops.WalkMetadataByQuery, "Image") + + q = s.applyDefaultSort(q) + + err := pgSearch.RunCursorQueryForSchemaFn(ctx, pkgSchema.ImagesV2Schema, q, s.db, fn) + if err != nil { + return errors.Wrap(err, "cursor by query") + } + return nil +} + // GetImageMetadata returns the image without scan/component data. func (s *storeImpl) GetImageMetadata(ctx context.Context, id string) (*storage.ImageV2, bool, error) { defer metrics.SetPostgresOperationDurationTime(time.Now(), ops.Get, "ImageV2Metadata") diff --git a/central/imagev2/datastore/store/store.go b/central/imagev2/datastore/store/store.go index 8e62886f4b480..b5ec1dd57f2c3 100644 --- a/central/imagev2/datastore/store/store.go +++ b/central/imagev2/datastore/store/store.go @@ -25,6 +25,7 @@ type Store interface { GetImageMetadata(ctx context.Context, id string) (*storage.ImageV2, bool, error) GetManyImageMetadata(ctx context.Context, id []string) ([]*storage.ImageV2, error) WalkByQuery(ctx context.Context, q *v1.Query, fn func(img *storage.ImageV2) error) error + WalkMetadataByQuery(ctx context.Context, q *v1.Query, fn func(img *storage.ImageV2) error) error Upsert(ctx context.Context, image *storage.ImageV2) error Delete(ctx context.Context, id string) error diff --git a/central/imagev2/datastoretest/datastore_impl_test.go b/central/imagev2/datastoretest/datastore_impl_test.go index 3ed6c2147ca99..25e9cead8c04c 100644 --- a/central/imagev2/datastoretest/datastore_impl_test.go +++ b/central/imagev2/datastoretest/datastore_impl_test.go @@ -646,3 +646,119 @@ func cloneAndUpdateRiskPriority(image *storage.ImageV2) *storage.ImageV2 { } return cloned } + +func (s *ImageV2DataStoreTestSuite) TestSearchListImages() { + ctx := sac.WithAllAccess(context.Background()) + + // Create and upsert test images + img1 := getTestImageV2("img1") + img2 := getTestImageV2("img2") + img3 := getTestImageV2("img3") + + s.NoError(s.datastore.UpsertImage(ctx, img1)) + s.NoError(s.datastore.UpsertImage(ctx, img2)) + s.NoError(s.datastore.UpsertImage(ctx, img3)) + + // Test 1: Search all images with empty query + listImages, err := s.datastore.SearchListImages(ctx, pkgSearch.EmptyQuery()) + s.NoError(err) + s.Len(listImages, 3) + + // Verify that all images are present + // Note: In V2->V1 conversion, Image.Id is set to the digest (SHA), not the UUID + imageDigests := set.NewStringSet() + for _, img := range listImages { + imageDigests.Add(img.GetId()) + // Verify priority is set by the ranker + s.NotZero(img.GetPriority()) + // Verify ListImage has expected fields + s.NotEmpty(img.GetId()) + s.NotEmpty(img.GetName()) + // LastUpdated should be set + s.NotNil(img.GetLastUpdated()) + } + // Verify all image digests are present + s.True(imageDigests.Contains(img1.GetDigest())) + s.True(imageDigests.Contains(img2.GetDigest())) + s.True(imageDigests.Contains(img3.GetDigest())) + + // Test 2: Search with specific image name + q := pkgSearch.NewQueryBuilder().AddStrings(pkgSearch.ImageName, "registry.test.io/img1:latest").ProtoQuery() + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 1) + s.Equal(img1.GetDigest(), listImages[0].GetId()) // ListImage ID is the digest in V1 format + s.Equal("registry.test.io/img1:latest", listImages[0].GetName()) + + // Test 3: Search with image UUID (V2 ID) + q = pkgSearch.NewQueryBuilder().AddExactMatches(pkgSearch.ImageID, img2.GetId()).ProtoQuery() + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 1) + s.Equal(img2.GetDigest(), listImages[0].GetId()) + + // Test 4: Search with SHA + q = pkgSearch.NewQueryBuilder().AddExactMatches(pkgSearch.ImageSHA, img3.GetDigest()).ProtoQuery() + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 1) + s.Equal(img3.GetDigest(), listImages[0].GetId()) + + // Test 5: Search with pagination + q = pkgSearch.EmptyQuery() + q.Pagination = &v1.QueryPagination{ + Limit: 2, + Offset: 0, + } + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 2) + + // Test 6: Search with no results + q = pkgSearch.NewQueryBuilder().AddStrings(pkgSearch.ImageName, "nonexistent").ProtoQuery() + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 0) + + // Test 7: Verify ListImage contains component and CVE counts from ScanStats + // Create an image with scan stats populated + imgWithStats := fixtures.GetImageV2WithUniqueComponents(3) + s.NoError(s.datastore.UpsertImage(ctx, imgWithStats)) + + q = pkgSearch.NewQueryBuilder().AddExactMatches(pkgSearch.ImageID, imgWithStats.GetId()).ProtoQuery() + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Len(listImages, 1) + s.Equal(imgWithStats.GetDigest(), listImages[0].GetId()) + + // Verify the component count comes from ScanStats (fixture sets ComponentCount to 3) + s.Equal(int32(3), listImages[0].GetComponents(), "Expected component count to match fixture") + + // CVE and fixable CVE counts may be populated by the datastore during upsert + // We just verify they are non-negative (could be 0 if not populated) + s.GreaterOrEqual(listImages[0].GetCves(), int32(0)) + s.GreaterOrEqual(listImages[0].GetFixableCves(), int32(0)) + + // Test 8: Test with access control - no access context + noAccessCtx := sac.WithNoAccess(context.Background()) + listImages, err = s.datastore.SearchListImages(noAccessCtx, pkgSearch.EmptyQuery()) + s.NoError(err) + s.Len(listImages, 0) + + // Test 9: Verify sorting works correctly + q = pkgSearch.EmptyQuery() + q.Pagination = &v1.QueryPagination{ + SortOptions: []*v1.QuerySortOption{ + { + Field: pkgSearch.ImageName.String(), + }, + }, + } + listImages, err = s.datastore.SearchListImages(ctx, q) + s.NoError(err) + s.Greater(len(listImages), 1) + // Verify images are sorted by name + for i := 1; i < len(listImages); i++ { + s.LessOrEqual(listImages[i-1].GetName(), listImages[i].GetName(), "Images should be sorted by name") + } +} From 23bd3b6e5db8eb00cc91abfedab143ccef24bb21 Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Wed, 11 Feb 2026 17:58:28 -0600 Subject: [PATCH 168/232] ROX-33051: Update Scanner V4 Vuln Bundle Ref to use timeouts (#18983) --- scanner/updater/version/VULNERABILITY_BUNDLE_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION index 555de35e6e78f..885ecd967a23b 100644 --- a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION +++ b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION @@ -6,4 +6,4 @@ # This process ensures that each version's vulnerability data is accurately captured and maintained in accordance with its respective version. dev heads/master v1 4.5.2 -v2 4.8.4 +v2 tags/4.9.4-rc.0-6-g2bcc59d86d From 1fe90bd83047c558a5dbc87591eeaa89764728c9 Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Wed, 11 Feb 2026 23:11:36 -0600 Subject: [PATCH 169/232] ROX-33051: Reverts change to v2 bundle ref (#18990) --- scanner/updater/version/VULNERABILITY_BUNDLE_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION index 885ecd967a23b..555de35e6e78f 100644 --- a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION +++ b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION @@ -6,4 +6,4 @@ # This process ensures that each version's vulnerability data is accurately captured and maintained in accordance with its respective version. dev heads/master v1 4.5.2 -v2 tags/4.9.4-rc.0-6-g2bcc59d86d +v2 4.8.4 From 74f748ef001b93ab0191fef920b7ff11cd7c1d5e Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Thu, 12 Feb 2026 20:52:03 +0530 Subject: [PATCH 170/232] ROX-33096: Consistent naming for File Activity Monitoring feature (#18955) --- deploy/common/k8sbased.sh | 4 +- .../internal/config-shape.yaml | 4 +- .../internal/defaults/40-resources.yaml | 2 +- .../internal/expandables.yaml | 2 +- .../templates/collector.yaml.htpl | 4 +- operator/api/v1alpha1/securedcluster_types.go | 28 ++++---- .../api/v1alpha1/zz_generated.deepcopy.go | 48 ++++++------- .../platform.stackrox.io_securedclusters.yaml | 70 +++++++++---------- .../rhacs-operator.clusterserviceversion.yaml | 25 ++++--- .../platform.stackrox.io_securedclusters.yaml | 70 +++++++++---------- .../rhacs-operator.clusterserviceversion.yaml | 21 +++--- operator/install/manifest.yaml | 10 +-- .../securedcluster/defaults/static.go | 4 +- .../values/translation/translation.go | 20 +++--- tests/e2e/lib.sh | 8 +-- .../e2e/yaml/secured-cluster-cr.envsubst.yaml | 4 +- 16 files changed, 165 insertions(+), 159 deletions(-) diff --git a/deploy/common/k8sbased.sh b/deploy/common/k8sbased.sh index 3e7471429a5cc..1b2fb2d6a485b 100644 --- a/deploy/common/k8sbased.sh +++ b/deploy/common/k8sbased.sh @@ -866,8 +866,8 @@ function launch_sensor { fi if [[ "${SFA_AGENT:-false}" == "true" ]]; then - echo "Enable Sensitive File Activity agent" - extra_helm_config+=(--set "collector.sfaEnabled=true") + echo "Enable File Activity Monitoring" + extra_helm_config+=(--set "collector.famEnabled=true") fi if [[ -n "$CI" ]]; then diff --git a/image/templates/helm/stackrox-secured-cluster/internal/config-shape.yaml b/image/templates/helm/stackrox-secured-cluster/internal/config-shape.yaml index d4c78a88223ca..8c6da732ab99e 100644 --- a/image/templates/helm/stackrox-secured-cluster/internal/config-shape.yaml +++ b/image/templates/helm/stackrox-secured-cluster/internal/config-shape.yaml @@ -134,8 +134,8 @@ collector: complianceImagePullPolicy: null # string complianceResources: null # string | dict nodeScanningResources: null # string | dict - sfaEnabled: null # bool - sfaScanningResources: null # string | dict + famEnabled: null # bool + famResources: null # string | dict serviceTLS: cert: null # string key: null # string diff --git a/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml b/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml index 3a3ec77d3ad8e..eab9f4a010ff6 100644 --- a/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml +++ b/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml @@ -43,7 +43,7 @@ collector: memory: "500Mi" cpu: "1" - sfaResources: + famResources: resources: requests: memory: "320Mi" diff --git a/image/templates/helm/stackrox-secured-cluster/internal/expandables.yaml b/image/templates/helm/stackrox-secured-cluster/internal/expandables.yaml index 1bbead90788f6..4c5ebcc9ae580 100644 --- a/image/templates/helm/stackrox-secured-cluster/internal/expandables.yaml +++ b/image/templates/helm/stackrox-secured-cluster/internal/expandables.yaml @@ -28,7 +28,7 @@ collector: resources: true complianceResources: true nodeScanningResources: true - sfaResources: true + famResources: true nodeSelector: true scanner: resources: true diff --git a/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl b/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl index 27f318e8ccc9b..af6e5d8c35d14 100644 --- a/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl +++ b/image/templates/helm/stackrox-secured-cluster/templates/collector.yaml.htpl @@ -110,7 +110,7 @@ spec: [<- if .FeatureFlags.ROX_SENSITIVE_FILE_ACTIVITY >] - {{- if ._rox.collector.sfaEnabled }} + {{- if ._rox.collector.famEnabled }} - name: fact image: {{ quote ._rox.image.fact.fullRef }} imagePullPolicy: {{ ._rox.fact.imagePullPolicy }} @@ -133,7 +133,7 @@ spec: value: "/etc/ssh/sshd_config:/etc/sudoers:/etc/passwd:/etc/shadow" {{- include "srox.envVars" (list . "daemonset" "collector" "fact") | nindent 8 }} resources: - {{- ._rox.collector._sfaResources | nindent 10 }} + {{- ._rox.collector._famResources | nindent 10 }} securityContext: capabilities: drop: diff --git a/operator/api/v1alpha1/securedcluster_types.go b/operator/api/v1alpha1/securedcluster_types.go index c878994795ea1..35b098d8fc6ba 100644 --- a/operator/api/v1alpha1/securedcluster_types.go +++ b/operator/api/v1alpha1/securedcluster_types.go @@ -266,9 +266,9 @@ type PerNodeSpec struct { //+operator-sdk:csv:customresourcedefinitions:type=spec,order=3,displayName="Node Scanning Settings" NodeInventory *ContainerSpec `json:"nodeInventory,omitempty"` - // Settings for the Sensitive File Activity container, which is responsible for file activity monitoring on the Node. - //+operator-sdk:csv:customresourcedefinitions:type=spec,order=4,displayName="SFA" - SFA *SFAContainerSpec `json:"sfa,omitempty"` + // Settings for the File Activity Monitoring container, which is responsible for monitoring file operations on the Node. + //+operator-sdk:csv:customresourcedefinitions:type=spec,order=4,displayName="File Activity Monitoring Settings" + FileActivityMonitoring *FAMContainerSpec `json:"fileActivityMonitoring,omitempty"` // To ensure comprehensive monitoring of your cluster activity, Red Hat Advanced Cluster Security // will run services on every node in the cluster, including tainted nodes by default. If you do @@ -370,28 +370,28 @@ type CollectorContainerSpec struct { ContainerSpec `json:",inline"` } -// SFAContainerSpec defines settings for the Sensitive File Activity agent container. -type SFAContainerSpec struct { - // Specifies whether Sensitive File Activity agent is deployed. +// FAMContainerSpec defines settings for the File Activity Monitoring container. +type FAMContainerSpec struct { + // Specifies whether File Activity Monitoring is deployed. // The default is: Disabled. - //+operator-sdk:csv:customresourcedefinitions:type=spec,order=1,displayName="SFA Agent" - Agent *DeploySFAAgent `json:"agent,omitempty"` + //+operator-sdk:csv:customresourcedefinitions:type=spec,order=1,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:select:Enabled", "urn:alm:descriptor:com.tectonic.ui:select:Disabled"} + Mode *FAMMode `json:"mode,omitempty"` //+operator-sdk:csv:customresourcedefinitions:type=spec,order=2 ContainerSpec `json:",inline"` } -// DeploySFAAgent is a type for values of spec.perNode.sfa.agent +// FAMMode is a type for values of spec.perNode.fileActivityMonitoring.mode // +kubebuilder:validation:Enum=Enabled;Disabled -type DeploySFAAgent string +type FAMMode string const ( - SFAAgentEnabled DeploySFAAgent = "Enabled" - SFAAgentDisabled DeploySFAAgent = "Disabled" + FileActivityMonitoringEnabled FAMMode = "Enabled" + FileActivityMonitoringDisabled FAMMode = "Disabled" ) -// Pointer returns the given DeploySFAAgent value as a pointer, needed in k8s resource structs. -func (v DeploySFAAgent) Pointer() *DeploySFAAgent { +// Pointer returns the given Mode value as a pointer, needed in k8s resource structs. +func (v FAMMode) Pointer() *FAMMode { return &v } diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go index ad07c187331a0..4a5a71ac38d75 100644 --- a/operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/operator/api/v1alpha1/zz_generated.deepcopy.go @@ -947,6 +947,27 @@ func (in *ExposureRouteReencryptTLS) DeepCopy() *ExposureRouteReencryptTLS { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FAMContainerSpec) DeepCopyInto(out *FAMContainerSpec) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(FAMMode) + **out = **in + } + in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FAMContainerSpec. +func (in *FAMContainerSpec) DeepCopy() *FAMContainerSpec { + if in == nil { + return nil + } + out := new(FAMContainerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlobalMonitoring) DeepCopyInto(out *GlobalMonitoring) { *out = *in @@ -1296,9 +1317,9 @@ func (in *PerNodeSpec) DeepCopyInto(out *PerNodeSpec) { *out = new(ContainerSpec) (*in).DeepCopyInto(*out) } - if in.SFA != nil { - in, out := &in.SFA, &out.SFA - *out = new(SFAContainerSpec) + if in.FileActivityMonitoring != nil { + in, out := &in.FileActivityMonitoring, &out.FileActivityMonitoring + *out = new(FAMContainerSpec) (*in).DeepCopyInto(*out) } if in.TaintToleration != nil { @@ -1345,27 +1366,6 @@ func (in *ProcessBaselinesSpec) DeepCopy() *ProcessBaselinesSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SFAContainerSpec) DeepCopyInto(out *SFAContainerSpec) { - *out = *in - if in.Agent != nil { - in, out := &in.Agent, &out.Agent - *out = new(DeploySFAAgent) - **out = **in - } - in.ContainerSpec.DeepCopyInto(&out.ContainerSpec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFAContainerSpec. -func (in *SFAContainerSpec) DeepCopy() *SFAContainerSpec { - if in == nil { - return nil - } - out := new(SFAContainerSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ScannerAnalyzerComponent) DeepCopyInto(out *ScannerAnalyzerComponent) { *out = *in diff --git a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml index 373bc64ad6527..8c93f82f5d879 100644 --- a/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml +++ b/operator/bundle/manifests/platform.stackrox.io_securedclusters.yaml @@ -818,31 +818,18 @@ spec: type: object type: object type: object - hostAliases: - description: HostAliases allows configuring additional hostnames - to resolve in the pod's hosts file. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - x-kubernetes-list-type: atomic - ip: - description: IP address of the host file entry. - type: string - required: - - ip - type: object - type: array - nodeInventory: - description: Settings for the Node-Inventory container, which - is responsible for scanning the Nodes' filesystem. + fileActivityMonitoring: + description: Settings for the File Activity Monitoring container, + which is responsible for monitoring file operations on the Node. properties: + mode: + description: |- + Specifies whether File Activity Monitoring is deployed. + The default is: Disabled. + enum: + - Enabled + - Disabled + type: string resources: description: |- Allows overriding the default resource settings for this component. Please consult the documentation @@ -905,18 +892,31 @@ spec: type: object type: object type: object - sfa: - description: Settings for the Sensitive File Activity container, - which is responsible for file activity monitoring on the Node. + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeInventory: + description: Settings for the Node-Inventory container, which + is responsible for scanning the Nodes' filesystem. properties: - agent: - description: |- - Specifies whether Sensitive File Activity agent is deployed. - The default is: Disabled. - enum: - - Enabled - - Disabled - type: string resources: description: |- Allows overriding the default resource settings for this component. Please consult the documentation diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index 8f2125ae73d86..c901c6988ed61 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -1407,10 +1407,10 @@ spec: for scanning the Nodes' filesystem. displayName: Node Scanning Settings path: perNode.nodeInventory - - description: Settings for the Sensitive File Activity container, which is - responsible for file activity monitoring on the Node. - displayName: SFA - path: perNode.sfa + - description: Settings for the File Activity Monitoring container, which is + responsible for monitoring file operations on the Node. + displayName: File Activity Monitoring Settings + path: perNode.fileActivityMonitoring - description: 'To ensure comprehensive monitoring of your cluster activity, Red Hat Advanced Cluster Security @@ -1470,25 +1470,28 @@ spec: path: perNode.compliance.resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements + - description: 'Specifies whether File Activity Monitoring is deployed. + + The default is: Disabled.' + displayName: Mode + path: perNode.fileActivityMonitoring.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Enabled + - urn:alm:descriptor:com.tectonic.ui:select:Disabled - description: 'Allows overriding the default resource settings for this component. Please consult the documentation for an overview of default resource requirements and a sizing guide.' displayName: Resources - path: perNode.nodeInventory.resources + path: perNode.fileActivityMonitoring.resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements - - description: 'Specifies whether Sensitive File Activity agent is deployed. - - The default is: Disabled.' - displayName: SFA Agent - path: perNode.sfa.agent - description: 'Allows overriding the default resource settings for this component. Please consult the documentation for an overview of default resource requirements and a sizing guide.' displayName: Resources - path: perNode.sfa.resources + path: perNode.nodeInventory.resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements - description: 'Should process baselines be automatically locked when the observation diff --git a/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml b/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml index 168d2f655024d..8a69dfecad4e2 100644 --- a/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml +++ b/operator/config/crd/bases/platform.stackrox.io_securedclusters.yaml @@ -817,31 +817,18 @@ spec: type: object type: object type: object - hostAliases: - description: HostAliases allows configuring additional hostnames - to resolve in the pod's hosts file. - items: - description: |- - HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the - pod's hosts file. - properties: - hostnames: - description: Hostnames for the above IP address. - items: - type: string - type: array - x-kubernetes-list-type: atomic - ip: - description: IP address of the host file entry. - type: string - required: - - ip - type: object - type: array - nodeInventory: - description: Settings for the Node-Inventory container, which - is responsible for scanning the Nodes' filesystem. + fileActivityMonitoring: + description: Settings for the File Activity Monitoring container, + which is responsible for monitoring file operations on the Node. properties: + mode: + description: |- + Specifies whether File Activity Monitoring is deployed. + The default is: Disabled. + enum: + - Enabled + - Disabled + type: string resources: description: |- Allows overriding the default resource settings for this component. Please consult the documentation @@ -904,18 +891,31 @@ spec: type: object type: object type: object - sfa: - description: Settings for the Sensitive File Activity container, - which is responsible for file activity monitoring on the Node. + hostAliases: + description: HostAliases allows configuring additional hostnames + to resolve in the pod's hosts file. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + x-kubernetes-list-type: atomic + ip: + description: IP address of the host file entry. + type: string + required: + - ip + type: object + type: array + nodeInventory: + description: Settings for the Node-Inventory container, which + is responsible for scanning the Nodes' filesystem. properties: - agent: - description: |- - Specifies whether Sensitive File Activity agent is deployed. - The default is: Disabled. - enum: - - Enabled - - Disabled - type: string resources: description: |- Allows overriding the default resource settings for this component. Please consult the documentation diff --git a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml index b9cb95381c27e..d115a22e3eb79 100644 --- a/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/rhacs-operator.clusterserviceversion.yaml @@ -1006,10 +1006,13 @@ spec: - urn:alm:descriptor:com.tectonic.ui:select:CORE_BPF - urn:alm:descriptor:com.tectonic.ui:select:NoCollection - description: |- - Specifies whether Sensitive File Activity agent is deployed. + Specifies whether File Activity Monitoring is deployed. The default is: Disabled. - displayName: SFA Agent - path: perNode.sfa.agent + displayName: Mode + path: perNode.fileActivityMonitoring.mode + x-descriptors: + - urn:alm:descriptor:com.tectonic.ui:select:Enabled + - urn:alm:descriptor:com.tectonic.ui:select:Disabled - description: |- Should process baselines be automatically locked when the observation period (1 hour by default) ends. The default is: Disabled. @@ -1225,10 +1228,10 @@ spec: When Optional is false, and the specified resource does not exist in the output manifests, an error will be thrown. displayName: Optional path: overlays[0].optional - - description: Settings for the Sensitive File Activity container, which is - responsible for file activity monitoring on the Node. - displayName: SFA - path: perNode.sfa + - description: Settings for the File Activity Monitoring container, which is + responsible for monitoring file operations on the Node. + displayName: File Activity Monitoring Settings + path: perNode.fileActivityMonitoring - description: 'The default is: 5.' displayName: Autoscaling Maximum Replicas path: scanner.analyzer.scaling.maxReplicas @@ -1354,14 +1357,14 @@ spec: Allows overriding the default resource settings for this component. Please consult the documentation for an overview of default resource requirements and a sizing guide. displayName: Resources - path: perNode.nodeInventory.resources + path: perNode.fileActivityMonitoring.resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements - description: |- Allows overriding the default resource settings for this component. Please consult the documentation for an overview of default resource requirements and a sizing guide. displayName: Resources - path: perNode.sfa.resources + path: perNode.nodeInventory.resources x-descriptors: - urn:alm:descriptor:com.tectonic.ui:resourceRequirements - description: |- diff --git a/operator/install/manifest.yaml b/operator/install/manifest.yaml index 880371f119f4b..b742b25d9913c 100644 --- a/operator/install/manifest.yaml +++ b/operator/install/manifest.yaml @@ -3062,13 +3062,13 @@ spec: type: object type: object type: object - sfa: - description: Settings for the Sensitive File Activity container, - which is responsible for file activity monitoring on the Node. + fileActivityMonitoring: + description: Settings for the File Activity Monitoring container, + which is responsible for monitoring file operations on the Node. properties: - agent: + mode: description: |- - Specifies whether Sensitive File Activity agent is deployed. + Specifies whether File Activity Monitoring is deployed. The default is: Disabled. enum: - Enabled diff --git a/operator/internal/securedcluster/defaults/static.go b/operator/internal/securedcluster/defaults/static.go index 32f1e123a1a90..838e48280c0ff 100644 --- a/operator/internal/securedcluster/defaults/static.go +++ b/operator/internal/securedcluster/defaults/static.go @@ -21,8 +21,8 @@ var staticDefaults = platform.SecuredClusterSpec{ Collection: platform.CollectionCOREBPF.Pointer(), }, TaintToleration: platform.TaintTolerate.Pointer(), - SFA: &platform.SFAContainerSpec{ - Agent: platform.SFAAgentDisabled.Pointer(), + FileActivityMonitoring: &platform.FAMContainerSpec{ + Mode: platform.FileActivityMonitoringDisabled.Pointer(), }, }, AuditLogs: &platform.AuditLogsSpec{ diff --git a/operator/internal/securedcluster/values/translation/translation.go b/operator/internal/securedcluster/values/translation/translation.go index c6fd1f93e7cd6..e965f2d684b0f 100644 --- a/operator/internal/securedcluster/values/translation/translation.go +++ b/operator/internal/securedcluster/values/translation/translation.go @@ -420,7 +420,7 @@ func (t Translator) getCollectorValues(perNode *platform.PerNodeSpec) *translati cv.AddAllFrom(t.getCollectorContainerValues(perNode.Collector)) cv.AddAllFrom(t.getComplianceContainerValues(perNode.Compliance)) cv.AddAllFrom(t.getNodeInventoryContainerValues(perNode.NodeInventory)) - cv.AddAllFrom(t.getSFAContainerValues(perNode.SFA)) + cv.AddAllFrom(t.getFAMContainerValues(perNode.FileActivityMonitoring)) return &cv } @@ -476,22 +476,22 @@ func (t Translator) getNodeInventoryContainerValues(nodeInventory *platform.Cont return &cv } -func (t Translator) getSFAContainerValues(sfaContainerSpec *platform.SFAContainerSpec) *translation.ValuesBuilder { - if sfaContainerSpec == nil { +func (t Translator) getFAMContainerValues(famContainerSpec *platform.FAMContainerSpec) *translation.ValuesBuilder { + if famContainerSpec == nil { return nil } cv := translation.NewValuesBuilder() - switch *sfaContainerSpec.Agent { - case platform.SFAAgentEnabled: - cv.SetBoolValue("sfaEnabled", true) - case platform.SFAAgentDisabled: - cv.SetBoolValue("sfaEnabled", false) + switch *famContainerSpec.Mode { + case platform.FileActivityMonitoringEnabled: + cv.SetBoolValue("famEnabled", true) + case platform.FileActivityMonitoringDisabled: + cv.SetBoolValue("famEnabled", false) default: - return cv.SetError(errors.Errorf("invalid spec.perNode.sfa.agent setting %q", *sfaContainerSpec.Agent)) + return cv.SetError(errors.Errorf("invalid spec.perNode.fileActivityMonitoring.mode setting %q", *famContainerSpec.Mode)) } - cv.AddChild("sfaResources", translation.GetResources(sfaContainerSpec.Resources)) + cv.AddChild("famResources", translation.GetResources(famContainerSpec.Resources)) return &cv } diff --git a/tests/e2e/lib.sh b/tests/e2e/lib.sh index 2b088a5689dd4..c6b79a62e01d8 100755 --- a/tests/e2e/lib.sh +++ b/tests/e2e/lib.sh @@ -414,7 +414,7 @@ deploy_sensor_via_operator() { local central_namespace=${2:-stackrox} local validate=${3:-true} local scanner_component_setting="Disabled" - local sfa_agent_setting="Disabled" + local fam_mode_setting="Disabled" local central_endpoint="central.${central_namespace}.svc:443" info "Deploying sensor using operator into namespace ${sensor_namespace} (central is expected in namespace ${central_namespace})" @@ -442,13 +442,13 @@ deploy_sensor_via_operator() { fi if [[ "${SFA_AGENT:-}" == "Enabled" ]]; then - echo "Enabling SFA agent due to SFA_AGENT variable: ${SFA_AGENT}" - sfa_agent_setting="Enabled" + echo "Enabling File Activity Monitoring due to SFA_AGENT variable: ${SFA_AGENT}" + fam_mode_setting="Enabled" fi env - \ scanner_component_setting="$scanner_component_setting" \ - sfa_agent_setting="$sfa_agent_setting" \ + fam_mode_setting="$fam_mode_setting" \ central_endpoint="$central_endpoint" \ "${envsubst}" \ < "${secured_cluster_yaml_path}" | kubectl apply -n "${sensor_namespace}" --validate="${validate}" -f - diff --git a/tests/e2e/yaml/secured-cluster-cr.envsubst.yaml b/tests/e2e/yaml/secured-cluster-cr.envsubst.yaml index ee74e9daf64fe..51067bcc83ef5 100644 --- a/tests/e2e/yaml/secured-cluster-cr.envsubst.yaml +++ b/tests/e2e/yaml/secured-cluster-cr.envsubst.yaml @@ -6,8 +6,8 @@ spec: centralEndpoint: $central_endpoint clusterName: remote perNode: - sfa: - agent: $sfa_agent_setting + fileActivityMonitoring: + mode: $fam_mode_setting scanner: scannerComponent: $scanner_component_setting analyzer: From be351690fc5a651efe179a06bfbe1dc03bf9a795 Mon Sep 17 00:00:00 2001 From: AJ Heflin <77823405+ajheflin@users.noreply.github.com> Date: Thu, 12 Feb 2026 11:57:25 -0500 Subject: [PATCH 171/232] ROX-32723: Use view-based query for SearchListImages (#18922) Co-authored-by: Claude Opus 4.6 (1M context) --- central/imagev2/datastore/datastore_impl.go | 14 ++--- .../imagev2/datastore/store/mocks/store.go | 15 +++++ .../imagev2/datastore/store/postgres/store.go | 26 +++++++++ central/imagev2/datastore/store/store.go | 2 + central/imagev2/views/list_image_view.go | 55 +++++++++++++++++++ 5 files changed, 104 insertions(+), 8 deletions(-) create mode 100644 central/imagev2/views/list_image_view.go diff --git a/central/imagev2/datastore/datastore_impl.go b/central/imagev2/datastore/datastore_impl.go index 6686fa0ce6c25..607af94b2ff64 100644 --- a/central/imagev2/datastore/datastore_impl.go +++ b/central/imagev2/datastore/datastore_impl.go @@ -16,7 +16,6 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/concurrency" "github.com/stackrox/rox/pkg/errorhelpers" - imageTypes "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/sac" @@ -111,17 +110,16 @@ func (ds *datastoreImpl) SearchImages(ctx context.Context, q *v1.Query) ([]*v1.S func (ds *datastoreImpl) SearchListImages(ctx context.Context, q *v1.Query) ([]*storage.ListImage, error) { defer metrics.SetDatastoreFunctionDuration(time.Now(), "ImageV2", "SearchListImages") - var imgs []*storage.ListImage - err := ds.storage.WalkMetadataByQuery(ctx, q, func(img *storage.ImageV2) error { - imgs = append(imgs, imageTypes.ConvertImageToListImage(utils.ConvertToV1(img))) - return nil - }) + results, err := ds.storage.GetListImagesView(ctx, q) if err != nil { return nil, err } - for _, image := range imgs { - image.Priority = ds.imageRanker.GetRankForID(image.GetId()) + imgs := make([]*storage.ListImage, 0, len(results)) + for _, r := range results { + img := r.ToListImage() + img.Priority = ds.imageRanker.GetRankForID(r.Digest) + imgs = append(imgs, img) } return imgs, nil diff --git a/central/imagev2/datastore/store/mocks/store.go b/central/imagev2/datastore/store/mocks/store.go index 9ff0dee364a37..556bc31827199 100644 --- a/central/imagev2/datastore/store/mocks/store.go +++ b/central/imagev2/datastore/store/mocks/store.go @@ -165,6 +165,21 @@ func (mr *MockStoreMockRecorder) GetImagesRiskView(ctx, q any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImagesRiskView", reflect.TypeOf((*MockStore)(nil).GetImagesRiskView), ctx, q) } +// GetListImagesView mocks base method. +func (m *MockStore) GetListImagesView(ctx context.Context, q *v1.Query) ([]*views.ListImageV2View, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetListImagesView", ctx, q) + ret0, _ := ret[0].([]*views.ListImageV2View) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetListImagesView indicates an expected call of GetListImagesView. +func (mr *MockStoreMockRecorder) GetListImagesView(ctx, q any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListImagesView", reflect.TypeOf((*MockStore)(nil).GetListImagesView), ctx, q) +} + // GetManyImageMetadata mocks base method. func (m *MockStore) GetManyImageMetadata(ctx context.Context, id []string) ([]*storage.ImageV2, error) { m.ctrl.T.Helper() diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index d4465171a9acd..e5d7b044a9b8f 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -976,6 +976,32 @@ func (s *storeImpl) GetImagesIdAndDigestView(ctx context.Context, q *v1.Query) ( return results, err } +// GetListImagesView retrieves the fields needed for ListImage responses. +func (s *storeImpl) GetListImagesView(ctx context.Context, q *v1.Query) ([]*views.ListImageV2View, error) { + defer metrics.SetPostgresOperationDurationTime(time.Now(), ops.Search, "ListImagesView") + + q = s.applyDefaultSort(q) + + selects := []*v1.QuerySelect{ + search.NewQuerySelect(search.ImageSHA).Proto(), + search.NewQuerySelect(search.ImageName).Proto(), + search.NewQuerySelect(search.ComponentCount).Proto(), + search.NewQuerySelect(search.ImageCVECount).Proto(), + search.NewQuerySelect(search.FixableCVECount).Proto(), + search.NewQuerySelect(search.ImageCreatedTime).Proto(), + search.NewQuerySelect(search.LastUpdatedTime).Proto(), + } + cloned := q.CloneVT() + cloned.Selects = selects + + var results []*views.ListImageV2View + err := pgSearch.RunSelectRequestForSchemaFn[views.ListImageV2View](ctx, s.db, pkgSchema.ImagesV2Schema, cloned, func(row *views.ListImageV2View) error { + results = append(results, row) + return nil + }) + return results, err +} + // UpdateVulnState updates the state of a vulnerability in the store. func (s *storeImpl) UpdateVulnState(ctx context.Context, cve string, imageIDs []string, state storage.VulnerabilityState) error { defer metrics.SetPostgresOperationDurationTime(time.Now(), ops.Update, "UpdateVulnState") diff --git a/central/imagev2/datastore/store/store.go b/central/imagev2/datastore/store/store.go index b5ec1dd57f2c3..10e6555b828a6 100644 --- a/central/imagev2/datastore/store/store.go +++ b/central/imagev2/datastore/store/store.go @@ -36,4 +36,6 @@ type Store interface { GetImagesRiskView(ctx context.Context, q *v1.Query) ([]*views.ImageV2RiskView, error) // GetImagesIdAndDigestView retrieves an image id and digest for pruning purposes GetImagesIdAndDigestView(ctx context.Context, q *v1.Query) ([]*views.ImageIDAndDigestView, error) + // GetListImagesView retrieves the fields needed for ListImage responses + GetListImagesView(ctx context.Context, q *v1.Query) ([]*views.ListImageV2View, error) } diff --git a/central/imagev2/views/list_image_view.go b/central/imagev2/views/list_image_view.go new file mode 100644 index 0000000000000..82c328fa8bf91 --- /dev/null +++ b/central/imagev2/views/list_image_view.go @@ -0,0 +1,55 @@ +package views + +import ( + "time" + + "github.com/stackrox/rox/generated/storage" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// ListImageV2View holds the fields needed for ListImage responses, using view-based +// column selection instead of full protobuf deserialization. +type ListImageV2View struct { + Digest string `db:"image_sha"` + Name string `db:"image"` + ComponentCount int32 `db:"component_count"` + CVECount int32 `db:"image_cve_count"` + FixableCVECount int32 `db:"fixable_cve_count"` + Created *time.Time `db:"image_created_time"` + LastUpdated *time.Time `db:"last_updated"` +} + +// GetCreated returns the Created timestamp as a protobuf Timestamp, or nil if not set. +func (v *ListImageV2View) GetCreated() *timestamppb.Timestamp { + if v.Created == nil { + return nil + } + return timestamppb.New(*v.Created) +} + +// GetLastUpdated returns the LastUpdated timestamp as a protobuf Timestamp, or nil if not set. +func (v *ListImageV2View) GetLastUpdated() *timestamppb.Timestamp { + if v.LastUpdated == nil { + return nil + } + return timestamppb.New(*v.LastUpdated) +} + +// ToListImage converts a ListImageV2View to a storage.ListImage proto. +func (v *ListImageV2View) ToListImage() *storage.ListImage { + return &storage.ListImage{ + Id: v.Digest, + Name: v.Name, + SetComponents: &storage.ListImage_Components{ + Components: v.ComponentCount, + }, + SetCves: &storage.ListImage_Cves{ + Cves: v.CVECount, + }, + SetFixable: &storage.ListImage_FixableCves{ + FixableCves: v.FixableCVECount, + }, + Created: v.GetCreated(), + LastUpdated: v.GetLastUpdated(), + } +} From dba6a176230312eee6fa58937735c47b9749ef91 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Thu, 12 Feb 2026 19:41:57 -0500 Subject: [PATCH 172/232] chore: fix docker version (#19003) --- .devcontainer/devcontainer.json | 2 +- .github/workflows/batch-load-test-metrics.yml | 2 +- .github/workflows/build.yaml | 44 +++++++++---------- .../workflows/check-crd-compatibility.yaml | 2 +- .github/workflows/ci-failures-report.yml | 2 +- .github/workflows/fixxxer.yaml | 2 +- .github/workflows/scanner-build.yaml | 8 ++-- .../scanner-db-integration-tests.yaml | 2 +- .../scanner-versioned-definitions-update.yaml | 4 +- .github/workflows/style.yaml | 8 ++-- .github/workflows/unit-tests.yaml | 16 +++---- .openshift-ci/Dockerfile.build_root | 2 +- .openshift-ci/dev-requirements.txt | 2 +- BUILD_IMAGE_VERSION | 2 +- operator/Makefile | 3 +- operator/bundle_helpers/requirements.txt | 2 +- scale/signatures/deploy.yaml | 2 +- tests/e2e/run-e2e-tests.sh | 2 +- 18 files changed, 53 insertions(+), 54 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 512481c525896..fbf56430c81df 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,5 +1,5 @@ { - "image":"quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1", + "image":"quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2", "containerEnv":{ "CI":"true" }, diff --git a/.github/workflows/batch-load-test-metrics.yml b/.github/workflows/batch-load-test-metrics.yml index de06ff50f3905..1dbec06c9de11 100644 --- a/.github/workflows/batch-load-test-metrics.yml +++ b/.github/workflows/batch-load-test-metrics.yml @@ -12,7 +12,7 @@ jobs: if: ${{ github.repository_owner == 'stackrox' }} runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b83806c1e8957..0df04949e51cf 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -114,7 +114,7 @@ jobs: UI_PKG_INSTALL_EXTRA_ARGS: --ignore-scripts runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -149,7 +149,7 @@ jobs: pre-build-cli: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -190,7 +190,7 @@ jobs: needs: define-job-matrix runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -260,7 +260,7 @@ jobs: pre-build-docs: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -320,7 +320,7 @@ jobs: GO_BINARIES_BUILD_ARTIFACT: "" ROX_PRODUCT_BRANDING: "" container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -504,7 +504,7 @@ jobs: env: ROX_PRODUCT_BRANDING: "" container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -599,7 +599,7 @@ jobs: needs: - define-job-matrix container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -626,6 +626,10 @@ jobs: free-disk-space: 30 gcp-account: ${{ secrets.GCP_SERVICE_ACCOUNT_STACKROX_CI }} + - name: Set up QEMU + if: matrix.arch != 'amd64' + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -661,6 +665,15 @@ jobs: run: | ./scripts/ci/lib.sh registry_rw_login "quay.io/${QUAY_ORG}" + - name: Login to docker.io to mitigate rate limiting on downloading images + uses: docker/login-action@v3 + # Skip for external contributions. + if: | + github.event_name == 'push' || !github.event.pull_request.head.repo.fork + with: + username: ${{ secrets.DOCKERHUB_CI_ACCOUNT_USERNAME }} + password: ${{ secrets.DOCKERHUB_CI_ACCOUNT_PASSWORD }} + - name: Build Operator Bundle image if: | matrix.name != 'STACKROX_BRANDING' @@ -685,19 +698,6 @@ jobs: # simply builds the actual operator binary for the correct target architecture. CGO_ENABLED=0 GOARCH=${{ matrix.arch }} scripts/lib.sh retry 6 true make -C operator/ build docker-build - - name: Login to docker.io to mitigate rate limiting on downloading images - uses: docker/login-action@v3 - # Skip for external contributions. - if: | - github.event_name == 'push' || !github.event.pull_request.head.repo.fork - with: - username: ${{ secrets.DOCKERHUB_CI_ACCOUNT_USERNAME }} - password: ${{ secrets.DOCKERHUB_CI_ACCOUNT_PASSWORD }} - - - name: Set up QEMU - if: matrix.arch != 'amd64' - uses: docker/setup-qemu-action@v3 - - name: Check that Operator image is runnable run: docker run --rm --platform linux/${{ matrix.arch }} "quay.io/${QUAY_ORG}/stackrox-operator:$(make --quiet --no-print-directory -C operator tag)" --help @@ -761,7 +761,7 @@ jobs: env: ROX_PRODUCT_BRANDING: ${{ matrix.name }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -805,7 +805,7 @@ jobs: ARTIFACT_DIR: junit-reports/ runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 needs: - pre-build-cli - pre-build-go-binaries diff --git a/.github/workflows/check-crd-compatibility.yaml b/.github/workflows/check-crd-compatibility.yaml index 2e32fdb61d995..dd33ab13965be 100644 --- a/.github/workflows/check-crd-compatibility.yaml +++ b/.github/workflows/check-crd-compatibility.yaml @@ -20,7 +20,7 @@ jobs: check-crd-compatibility: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 outputs: released_version: ${{ steps.get_previous_released_version.outputs.released_version }} steps: diff --git a/.github/workflows/ci-failures-report.yml b/.github/workflows/ci-failures-report.yml index 369a09976720b..6432642796a9f 100644 --- a/.github/workflows/ci-failures-report.yml +++ b/.github/workflows/ci-failures-report.yml @@ -15,7 +15,7 @@ jobs: if: ${{ github.repository_owner == 'stackrox' }} runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 diff --git a/.github/workflows/fixxxer.yaml b/.github/workflows/fixxxer.yaml index f562e9ebe7eba..ca0ecafb6e273 100644 --- a/.github/workflows/fixxxer.yaml +++ b/.github/workflows/fixxxer.yaml @@ -17,7 +17,7 @@ jobs: if: ${{ github.event.issue.pull_request && github.event.comment.body == '/fixxx' }} runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Fetch PR metadata diff --git a/.github/workflows/scanner-build.yaml b/.github/workflows/scanner-build.yaml index 1493f701f16ae..0bd77af73b883 100644 --- a/.github/workflows/scanner-build.yaml +++ b/.github/workflows/scanner-build.yaml @@ -86,7 +86,7 @@ jobs: # race-condition-debug - built with -race matrix: ${{ fromJson(needs.define-scanner-job-matrix.outputs.matrix).pre_build_scanner_go_binary }} container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -146,7 +146,7 @@ jobs: needs: pre-build-scanner-go-binary runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 if: contains(github.event.pull_request.labels.*.name, 'scan-go-binaries') env: ARTIFACT_DIR: junit-reports/ @@ -200,7 +200,7 @@ jobs: # race-condition-debug - built with -race matrix: ${{ fromJson(needs.define-scanner-job-matrix.outputs.matrix).build_and_push_scanner }} container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -278,7 +278,7 @@ jobs: # race-condition-debug matrix: ${{ fromJson(needs.define-scanner-job-matrix.outputs.matrix).push_scanner_manifests }} container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 env: QUAY_RHACS_ENG_RW_USERNAME: ${{ secrets.QUAY_RHACS_ENG_RW_USERNAME }} QUAY_RHACS_ENG_RW_PASSWORD: ${{ secrets.QUAY_RHACS_ENG_RW_PASSWORD }} diff --git a/.github/workflows/scanner-db-integration-tests.yaml b/.github/workflows/scanner-db-integration-tests.yaml index a17a3c2766522..26e0f1d4969f4 100644 --- a/.github/workflows/scanner-db-integration-tests.yaml +++ b/.github/workflows/scanner-db-integration-tests.yaml @@ -17,7 +17,7 @@ jobs: db-integration-tests: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 diff --git a/.github/workflows/scanner-versioned-definitions-update.yaml b/.github/workflows/scanner-versioned-definitions-update.yaml index 12c7b57fe9f44..ede14a5fce80f 100644 --- a/.github/workflows/scanner-versioned-definitions-update.yaml +++ b/.github/workflows/scanner-versioned-definitions-update.yaml @@ -190,7 +190,7 @@ jobs: - prepare-environment runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 volumes: # The updater makes heavy use of /tmp files. - /tmp:/tmp @@ -283,7 +283,7 @@ jobs: - prepare-environment runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:scanner-test-0.5.2 volumes: # The updater makes heavy use of /tmp files. - /tmp:/tmp diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index 4991ff69d19d0..df239fd819858 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -24,7 +24,7 @@ jobs: ARTIFACT_DIR: junit-reports/ runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 @@ -50,7 +50,7 @@ jobs: ARTIFACT_DIR: junit-reports/ runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 @@ -91,7 +91,7 @@ jobs: style-check: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -238,7 +238,7 @@ jobs: openshift-ci-lint: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index f048282df2838..5d66291c28396 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -26,7 +26,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -97,7 +97,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -157,7 +157,7 @@ jobs: go-bench: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -207,7 +207,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 @@ -243,7 +243,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-ui-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-ui-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 @@ -286,7 +286,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 volumes: - /usr:/mnt/usr - /opt:/mnt/opt @@ -330,7 +330,7 @@ jobs: outputs: new-jiras: ${{ steps.junit2jira.outputs.new-jiras }} container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 @@ -364,7 +364,7 @@ jobs: openshift-ci-unit-tests: runs-on: ubuntu-latest container: - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 steps: - name: Checkout uses: actions/checkout@v6 diff --git a/.openshift-ci/Dockerfile.build_root b/.openshift-ci/Dockerfile.build_root index 158314ac8b810..dc8ba4bc9110a 100644 --- a/.openshift-ci/Dockerfile.build_root +++ b/.openshift-ci/Dockerfile.build_root @@ -27,4 +27,4 @@ # For an example, see https://github.com/stackrox/stackrox/pull/2762 and its counterpart # https://github.com/openshift/release/pull/31561 -FROM quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 +FROM quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 diff --git a/.openshift-ci/dev-requirements.txt b/.openshift-ci/dev-requirements.txt index 4b112aa426cd4..3cea5ed44ed04 100644 --- a/.openshift-ci/dev-requirements.txt +++ b/.openshift-ci/dev-requirements.txt @@ -1,4 +1,4 @@ -# These versions should match those used in the current CI test image (stackrox-test-0.5.1). +# These versions should match those used in the current CI test image (stackrox-test-0.5.2). # See .github/workflows/{lint,style}.yaml for that. # And the stackrox/rox-ci-image repo for the original source and pinned versions. pycodestyle==2.14.0 diff --git a/BUILD_IMAGE_VERSION b/BUILD_IMAGE_VERSION index 44776a28929be..455f95443ea3c 100644 --- a/BUILD_IMAGE_VERSION +++ b/BUILD_IMAGE_VERSION @@ -1 +1 @@ -stackrox-build-0.5.1 +stackrox-build-0.5.2 diff --git a/operator/Makefile b/operator/Makefile index a25efe522b009..c81bfd46c8793 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -347,8 +347,7 @@ build/Dockerfile.gen: Dockerfile .PHONY: docker-build docker-build: build/Dockerfile.gen smuggled-status-sh ## Build docker image with the operator. - DOCKER_BUILDKIT=1 BUILDKIT_PROGRESS=plain docker build \ - $(if $(DOCKER_BUILD_LOAD),--load) \ + BUILDKIT_PROGRESS=plain ../scripts/docker-build.sh \ -t ${IMG} \ $(if $(GOARCH),--build-arg TARGET_ARCH=$(GOARCH)) \ -f $< \ diff --git a/operator/bundle_helpers/requirements.txt b/operator/bundle_helpers/requirements.txt index afab1d7091db5..2bf4d90ed30a2 100644 --- a/operator/bundle_helpers/requirements.txt +++ b/operator/bundle_helpers/requirements.txt @@ -1,4 +1,4 @@ # PyYAML > 6.0 requires Python > 3.6. PyYAML==6.0 -# pytest==7.0.1 is the latest available for the quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 job container's Python. +# pytest==7.0.1 is the latest available for the quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 job container's Python. pytest==7.0.1 diff --git a/scale/signatures/deploy.yaml b/scale/signatures/deploy.yaml index acfe97d1589c1..82f3d2c843833 100644 --- a/scale/signatures/deploy.yaml +++ b/scale/signatures/deploy.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - name: update-signature - image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 + image: quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 imagePullPolicy: IfNotPresent command: - /bin/bash diff --git a/tests/e2e/run-e2e-tests.sh b/tests/e2e/run-e2e-tests.sh index b03d0b0cd6e67..68006502afcf3 100755 --- a/tests/e2e/run-e2e-tests.sh +++ b/tests/e2e/run-e2e-tests.sh @@ -148,7 +148,7 @@ if [[ ! -f "/i-am-rox-ci-image" ]]; then --platform linux/amd64 \ --rm -it \ --entrypoint="$0" \ - quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.1 "$@" + quay.io/stackrox-io/apollo-ci:stackrox-test-0.5.2 "$@" exit 0 fi From 682dc835b298c5fa3acc2e4934fe48a19fc3c97e Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Fri, 13 Feb 2026 11:02:42 +0100 Subject: [PATCH 173/232] ROX-31216: show signature integration names in image details (#18968) --- .../Image/ImagePageSignatureVerification.tsx | 23 ++------------ .../Tables/ImageOverviewTable.tsx | 2 ++ .../components/ImageDetailBadges.tsx | 1 + .../components/SignatureIntegrationLink.tsx | 29 +++++++++++++++++ .../components/VerifiedSignatureLabel.tsx | 31 +++++++++++++++++-- 5 files changed, 62 insertions(+), 24 deletions(-) create mode 100644 ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/SignatureIntegrationLink.tsx diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx index 3f839422a8f00..42a3d8cc48a7f 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Image/ImagePageSignatureVerification.tsx @@ -1,13 +1,11 @@ import { Divider, Flex, FlexItem, Label, PageSection, Text } from '@patternfly/react-core'; import { Table, TableText, Tbody, Td, Th, Thead, Tr } from '@patternfly/react-table'; import { CheckCircleIcon, ExclamationCircleIcon } from '@patternfly/react-icons'; -import { Link } from 'react-router-dom'; -import { integrationsPath } from 'routePaths'; import DateDistance from 'Components/DateDistance'; -import useIntegrationPermissions from 'Containers/Integrations/hooks/useIntegrationPermissions'; import type { SignatureVerificationResult, VerifiedStatus } from '../../types'; +import SignatureIntegrationLink from '../components/SignatureIntegrationLink'; export type ImagePageSignatureVerificationProps = { results?: SignatureVerificationResult[]; @@ -49,23 +47,6 @@ function getStatusMessage({ status, description }: SignatureVerificationResult) } function ImagePageSignatureVerification({ results }: ImagePageSignatureVerificationProps) { - const permissions = useIntegrationPermissions(); - const getIntegrationDetailsUrl = (verifierId: string): string => { - return `${integrationsPath}/signatureIntegrations/signature/view/${verifierId}`; - }; - - const renderIntegrationCell = (result: SignatureVerificationResult) => { - const displayName = result.verifierName || result.verifierId; - - // Show as link only if user has permissions. - if (permissions.signatureIntegrations.read) { - return {displayName}; - } - - // Fallback to plain text. - return displayName; - }; - return ( <> @@ -96,7 +77,7 @@ function ImagePageSignatureVerification({ results }: ImagePageSignatureVerificat > - {renderIntegrationCell(result)} + {getStatusMessage(result)} diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Tables/ImageOverviewTable.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Tables/ImageOverviewTable.tsx index 3eab612e3b1ce..7baea320d1041 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Tables/ImageOverviewTable.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/Tables/ImageOverviewTable.tsx @@ -116,6 +116,7 @@ export const imageListQuery = gql` status verifiedImageReferences verifierId + verifierName } } } @@ -166,6 +167,7 @@ export const imageV2ListQuery = gql` status verifiedImageReferences verifierId + verifierName } } } diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx index aac87bc2b31b5..e34746e5984af 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/ImageDetailBadges.tsx @@ -88,6 +88,7 @@ export const imageV2DetailsFragment = gql` verificationTime verifiedImageReferences verifierId + verifierName } } baseImage { diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/SignatureIntegrationLink.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/SignatureIntegrationLink.tsx new file mode 100644 index 0000000000000..589853ec6449a --- /dev/null +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/SignatureIntegrationLink.tsx @@ -0,0 +1,29 @@ +import { Link } from 'react-router-dom'; +import { + getIntegrationsListPath, + signatureIntegrationDescriptor as descriptor, + signatureIntegrationsSource as source, +} from 'Containers/Integrations/utils/integrationsList'; + +import usePermissions from 'hooks/usePermissions'; + +import type { SignatureVerificationResult } from '../../types'; + +export type SignatureIntegrationLinkProps = { + result: SignatureVerificationResult; +}; + +function SignatureIntegrationLink({ result }: SignatureIntegrationLinkProps) { + const { hasReadAccess } = usePermissions(); + const displayName = result.verifierName || result.verifierId; + const { type } = descriptor; + const detailsUrl = `${getIntegrationsListPath(source, type)}/view/${result.verifierId}`; + + if (hasReadAccess('Integration')) { + return {displayName}; + } + + return <>{displayName}; +} + +export default SignatureIntegrationLink; diff --git a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/VerifiedSignatureLabel.tsx b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/VerifiedSignatureLabel.tsx index 7df0692083bb3..8f41f22a1d0c1 100644 --- a/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/VerifiedSignatureLabel.tsx +++ b/ui/apps/platform/src/Containers/Vulnerabilities/WorkloadCves/components/VerifiedSignatureLabel.tsx @@ -1,9 +1,18 @@ import type { CSSProperties } from 'react'; -import { Flex, FlexItem, Label, List, ListItem, Popover } from '@patternfly/react-core'; +import { + ClipboardCopy, + Flex, + FlexItem, + Label, + List, + ListItem, + Popover, +} from '@patternfly/react-core'; import { CheckCircleIcon } from '@patternfly/react-icons'; import PopoverBodyContent from 'Components/PopoverBodyContent'; import type { SignatureVerificationResult } from '../../types'; +import SignatureIntegrationLink from './SignatureIntegrationLink'; export function getVerifiedSignatureInResults( results: SignatureVerificationResult[] | null | undefined @@ -26,6 +35,10 @@ const styleList = { marginTop: 'var(--pf-v5-c-list--li--MarginTop)', } as CSSProperties; +const clipboardCopyMaxWidthStyle = { + '--pf-v5-u-max-width--MaxWidth': '64ch', +} as CSSProperties; + function VerifiedSignatureLabel({ verifiedSignatureResults, className, @@ -46,10 +59,22 @@ function VerifiedSignatureLabel({ > {verifiedSignatureResults?.map((result) => ( - {result.verifierId} + + + {result.verifiedImageReferences?.map((name) => ( - {name} + + + {name} + + ))} From 13848d5c4717e9d8271569f95254849638108f8e Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Fri, 13 Feb 2026 11:29:59 +0000 Subject: [PATCH 174/232] fix: pull platform-specific images in sensor integration tests (#19016) Co-authored-by: Claude Sonnet 4.5 --- .github/workflows/unit-tests.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index 5d66291c28396..8dc7956c9ad71 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -422,8 +422,14 @@ jobs: # Skip empty lines and comments [[ -z "$image" || "$image" =~ ^# ]] && continue echo "Loading $image into Kind..." - docker pull "$image" - kind load docker-image "$image" --name chart-testing + # Pull directly inside Kind node to avoid docker save/import issues with multi-arch + # This bypasses the problematic "kind load docker-image" which uses --all-platforms + # GitHub Actions runners are linux/amd64, so we only need that platform + # Use --user flag to pass quay.io credentials to ctr + docker exec chart-testing-control-plane ctr -n k8s.io images pull \ + --user "${{ secrets.QUAY_RHACS_ENG_RO_USERNAME }}:${{ secrets.QUAY_RHACS_ENG_RO_PASSWORD }}" \ + --platform linux/amd64 \ + "$image" done < sensor/tests/images-to-prefetch.txt - name: Run sensor integration tests From 300a724cee3c409a32bb4424c90df64bc28b28e8 Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Fri, 13 Feb 2026 15:03:44 +0100 Subject: [PATCH 175/232] fix: speed up arm64 operator builds (#19023) --- .github/workflows/build.yaml | 26 +++++++++++++------------- operator/Dockerfile | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 0df04949e51cf..20dd30bcad1e2 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -626,10 +626,6 @@ jobs: free-disk-space: 30 gcp-account: ${{ secrets.GCP_SERVICE_ACCOUNT_STACKROX_CI }} - - name: Set up QEMU - if: matrix.arch != 'amd64' - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -665,15 +661,6 @@ jobs: run: | ./scripts/ci/lib.sh registry_rw_login "quay.io/${QUAY_ORG}" - - name: Login to docker.io to mitigate rate limiting on downloading images - uses: docker/login-action@v3 - # Skip for external contributions. - if: | - github.event_name == 'push' || !github.event.pull_request.head.repo.fork - with: - username: ${{ secrets.DOCKERHUB_CI_ACCOUNT_USERNAME }} - password: ${{ secrets.DOCKERHUB_CI_ACCOUNT_PASSWORD }} - - name: Build Operator Bundle image if: | matrix.name != 'STACKROX_BRANDING' @@ -698,6 +685,19 @@ jobs: # simply builds the actual operator binary for the correct target architecture. CGO_ENABLED=0 GOARCH=${{ matrix.arch }} scripts/lib.sh retry 6 true make -C operator/ build docker-build + - name: Login to docker.io to mitigate rate limiting on downloading images + uses: docker/login-action@v3 + # Skip for external contributions. + if: | + github.event_name == 'push' || !github.event.pull_request.head.repo.fork + with: + username: ${{ secrets.DOCKERHUB_CI_ACCOUNT_USERNAME }} + password: ${{ secrets.DOCKERHUB_CI_ACCOUNT_PASSWORD }} + + - name: Set up QEMU + if: matrix.arch != 'amd64' + uses: docker/setup-qemu-action@v3 + - name: Check that Operator image is runnable run: docker run --rm --platform linux/${{ matrix.arch }} "quay.io/${QUAY_ORG}/stackrox-operator:$(make --quiet --no-print-directory -C operator tag)" --help diff --git a/operator/Dockerfile b/operator/Dockerfile index fbc959f61815b..0ebaa95528713 100644 --- a/operator/Dockerfile +++ b/operator/Dockerfile @@ -2,7 +2,7 @@ ARG roxpath=/workspace/src/github.com/stackrox/rox ARG TARGET_ARCH=amd64 -FROM registry.access.redhat.com/ubi9/go-toolset:1.25 AS builder +FROM --platform=$BUILDPLATFORM registry.access.redhat.com/ubi9/go-toolset:1.25 AS builder # Build the manager binary ARG TARGET_ARCH From 9d88f2f4836385b2f72247abbd2a80e40c208d5c Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Fri, 13 Feb 2026 16:17:42 +0000 Subject: [PATCH 176/232] ROX-32914: replace deprecated Dependabot auto-merge workflow (#18980) Co-authored-by: Claude Sonnet 4.5 --- .github/workflows/auto-merge.yml | 113 ++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 23 deletions(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 9e2033cd97412..3da8b53614d5f 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -1,30 +1,97 @@ name: auto-merge on: - pull_request_target: - types: - - labeled + schedule: + - cron: '*/15 * * * *' # Every 15 minutes + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: auto-merge + cancel-in-progress: false jobs: - auto-merge-minor: - name: Auto-merge semver minor and patch bumps + auto-merge: + name: Enable auto-merge for eligible PRs runs-on: ubuntu-latest - if: github.actor == 'dependabot[bot]' && github.event.label.name == 'auto-merge' - steps: - - uses: ahmadnassri/action-dependabot-auto-merge@v2.6 - with: - github-token: '${{ secrets.RHACS_BOT_GITHUB_TOKEN }}' - command: "squash and merge" - approve: true - target: minor - - auto-merge-any: - name: Auto-merge any version bumps - runs-on: ubuntu-latest - if: github.actor == 'dependabot[bot]' && github.event.label.name == 'auto-merge-any' + if: github.repository_owner == 'stackrox' + steps: - - uses: ahmadnassri/action-dependabot-auto-merge@v2.6 - with: - github-token: '${{ secrets.RHACS_BOT_GITHUB_TOKEN }}' - command: "squash and merge" - approve: true + - name: Find and process PRs with auto-merge labels + env: + GH_TOKEN: ${{ secrets.RHACS_BOT_GITHUB_TOKEN }} + run: | + set -euo pipefail + + # Extract repo owner and name + OWNER="${{ github.repository_owner }}" + REPO="${{ github.event.repository.name }}" + + echo "::notice::Querying PRs with auto-merge labels" + + # Get all open PRs with auto-merge or auto-merge-any labels + PR_DATA=$(gh pr list \ + --repo "${{ github.repository }}" \ + --state open \ + --limit 50 \ + --json number,labels,isDraft,mergeable,author \ + --jq ".[] | select( + .isDraft == false and + .mergeable == \"MERGEABLE\" and + (.labels | map(.name) | any(. == \"auto-merge\" or . == \"auto-merge-any\")) + ) | {number, labels: [.labels[].name], author: .author.login}") + + if [[ -z "$PR_DATA" ]]; then + echo "::notice::No eligible PRs found with auto-merge labels" + exit 0 + fi + + # Process each PR + echo "$PR_DATA" | jq -c '.' | while read -r PR_JSON; do + PR_NUMBER=$(echo "$PR_JSON" | jq -r '.number') + AUTHOR=$(echo "$PR_JSON" | jq -r '.author') + LABELS=$(echo "$PR_JSON" | jq -r '.labels | join(",")') + + echo "::notice::Processing PR #$PR_NUMBER (author=$AUTHOR, labels=$LABELS)" + + # Check if all checks have passed using GraphQL statusCheckRollup + STATUS=$(gh api graphql -F owner="$OWNER" -F repo="$REPO" -F number="$PR_NUMBER" -f query=" + query(\$owner: String!, \$repo: String!, \$number: Int!) { + repository(owner: \$owner, name: \$repo) { + pullRequest(number: \$number) { + commits(last: 1) { + nodes { + commit { + statusCheckRollup { + state + } + } + } + } + } + } + } + " --jq ".data.repository.pullRequest.commits.nodes[0].commit.statusCheckRollup.state" || echo "null") + + echo "::notice::PR #$PR_NUMBER status check rollup: $STATUS" + + # Only proceed if all checks passed + if [[ "$STATUS" != "SUCCESS" ]]; then + echo "::warning::Skipping PR #$PR_NUMBER - checks not passed (status: $STATUS)" + continue + fi + + # All conditions met - enable auto-merge + echo "::notice::Approving PR #$PR_NUMBER" + gh pr review --repo "${{ github.repository }}" \ + --approve "$PR_NUMBER" || true + + echo "::notice::Enabling auto-merge for PR #$PR_NUMBER" + gh pr merge --repo "${{ github.repository }}" \ + --auto --squash "$PR_NUMBER" + + echo "::notice::✓ Auto-merge enabled for PR #$PR_NUMBER" + done From 6fb9d504c61a4b1ac7c334684280b1583eb04fed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 09:33:20 -0700 Subject: [PATCH 177/232] chore(deps): bump github.com/lib/pq from 1.11.1 to 1.11.2 (#18966) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 441baf54f9655..e359f023c0764 100644 --- a/go.mod +++ b/go.mod @@ -80,7 +80,7 @@ require ( github.com/jeremywohl/flatten v1.0.1 github.com/joshdk/go-junit v1.0.0 github.com/klauspost/compress v1.18.4 - github.com/lib/pq v1.11.1 + github.com/lib/pq v1.11.2 github.com/machinebox/graphql v0.2.2 github.com/mailru/easyjson v0.9.1 github.com/mdlayher/vsock v1.2.1 diff --git a/go.sum b/go.sum index 70b70faac6b6d..1a53246102314 100644 --- a/go.sum +++ b/go.sum @@ -1105,8 +1105,8 @@ github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.11.1 h1:wuChtj2hfsGmmx3nf1m7xC2XpK6OtelS2shMY+bGMtI= -github.com/lib/pq v1.11.1/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= +github.com/lib/pq v1.11.2 h1:x6gxUeu39V0BHZiugWe8LXZYZ+Utk7hSJGThs8sdzfs= +github.com/lib/pq v1.11.2/go.mod h1:/p+8NSbOcwzAEI7wiMXFlgydTwcgTr3OSKMsD2BitpA= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= From a4fdb7c98b3dee5fe63e548dfde69f4aa9ea703f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 09:33:39 -0700 Subject: [PATCH 178/232] chore(deps): bump github.com/go-git/go-git/v5 from 5.13.1 to 5.16.5 (#18930) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index e359f023c0764..18bf2f5a603c8 100644 --- a/go.mod +++ b/go.mod @@ -211,7 +211,7 @@ require ( github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/RaduBerinde/axisds v0.1.0 // indirect github.com/RaduBerinde/btreemap v0.0.0-20250419174037-3d62b7205d54 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect @@ -307,8 +307,8 @@ require ( github.com/go-chi/chi/v5 v5.2.4 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect + github.com/go-git/go-git/v5 v5.16.5 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.24.1 // indirect @@ -431,7 +431,7 @@ require ( github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/quay/claircore/updater/driver v1.0.0 // indirect github.com/quay/goval-parser v0.8.8 // indirect @@ -452,7 +452,7 @@ require ( github.com/sigstore/sigstore-go v1.1.4 // indirect github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect github.com/sirupsen/logrus v1.9.4 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/spdx/tools-golang v0.5.5 // indirect github.com/spf13/afero v1.15.0 // indirect diff --git a/go.sum b/go.sum index 1a53246102314..848371556b744 100644 --- a/go.sum +++ b/go.sum @@ -188,8 +188,8 @@ github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8 github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PagerDuty/go-pagerduty v1.8.0 h1:MTFqTffIcAervB83U7Bx6HERzLbyaSPL/+oxH3zyluI= github.com/PagerDuty/go-pagerduty v1.8.0/go.mod h1:nzIeAqyFSJAFkjWKvMzug0JtwDg+V+UoCWjFrfFH5mI= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RaduBerinde/axisds v0.1.0 h1:YItk/RmU5nvlsv/awo2Fjx97Mfpt4JfgtEVAGPrLdz8= @@ -517,8 +517,8 @@ github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdf github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= -github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -602,12 +602,12 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= +github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -1311,8 +1311,8 @@ github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1458,8 +1458,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= From 641b65b495a5895c41e54daadb91a4650c6ef1c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:38:13 +0100 Subject: [PATCH 179/232] chore(deps): bump github.com/go-git/go-git/v5 from 5.16.2 to 5.16.5 in /operator/tools/operator-sdk (#18931) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- operator/tools/operator-sdk/go.mod | 2 +- operator/tools/operator-sdk/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/operator/tools/operator-sdk/go.mod b/operator/tools/operator-sdk/go.mod index ee5f992345639..a0d5e3f57c484 100644 --- a/operator/tools/operator-sdk/go.mod +++ b/operator/tools/operator-sdk/go.mod @@ -82,7 +82,7 @@ require ( github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.7.0 // indirect - github.com/go-git/go-git/v5 v5.16.4 // indirect + github.com/go-git/go-git/v5 v5.16.5 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect diff --git a/operator/tools/operator-sdk/go.sum b/operator/tools/operator-sdk/go.sum index 30b6640e803fe..653bdc922a90e 100644 --- a/operator/tools/operator-sdk/go.sum +++ b/operator/tools/operator-sdk/go.sum @@ -160,8 +160,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.7.0 h1:83lBUJhGWhYp0ngzCMSgllhUSuoHP1iEWYjsPl9nwqM= github.com/go-git/go-billy/v5 v5.7.0/go.mod h1:/1IUejTKH8xipsAcdfcSAlUlo2J7lkYV8GTKxAT/L3E= -github.com/go-git/go-git/v5 v5.16.4 h1:7ajIEZHZJULcyJebDLo99bGgS0jRrOxzZG4uCk2Yb2Y= -github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8= +github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= +github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= From f02ea1a9364ce766d61a37b657b7d3a7b520098c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:38:34 +0100 Subject: [PATCH 180/232] chore(deps): bump github.com/golangci/golangci-lint/v2 from 2.8.0 to 2.9.0 in /tools/linters (#18960) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tools/linters/go.mod | 43 ++++++++++---------- tools/linters/go.sum | 95 ++++++++++++++++++++------------------------ 2 files changed, 65 insertions(+), 73 deletions(-) diff --git a/tools/linters/go.mod b/tools/linters/go.mod index 19f254183dcf3..906f130488efb 100644 --- a/tools/linters/go.mod +++ b/tools/linters/go.mod @@ -1,9 +1,9 @@ module github.com/stackrox/stackrox/tools/linters -go 1.25 +go 1.25.0 require ( - github.com/golangci/golangci-lint/v2 v2.8.0 + github.com/golangci/golangci-lint/v2 v2.9.0 github.com/nilslice/protolock v0.17.0 golang.org/x/vuln v1.1.4 ) @@ -25,12 +25,12 @@ require ( github.com/BurntSushi/toml v1.6.0 // indirect github.com/Djarvur/go-err113 v0.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect - github.com/MirrexOne/unqueryvet v1.4.0 // indirect + github.com/MirrexOne/unqueryvet v1.5.3 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect - github.com/alecthomas/chroma/v2 v2.21.1 // indirect + github.com/alecthomas/chroma/v2 v2.23.1 // indirect github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alexkohler/nakedret/v2 v2.0.6 // indirect - github.com/alexkohler/prealloc v1.0.1 // indirect + github.com/alexkohler/prealloc v1.0.2 // indirect github.com/alfatraining/structtag v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/alingse/nilnesserr v0.2.0 // indirect @@ -41,7 +41,7 @@ require ( github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v4 v4.7.0 // indirect - github.com/bombsimon/wsl/v5 v5.3.0 // indirect + github.com/bombsimon/wsl/v5 v5.6.0 // indirect github.com/breml/bidichk v0.3.3 // indirect github.com/breml/errchkjson v0.4.1 // indirect github.com/butuzov/ireturn v0.4.0 // indirect @@ -52,7 +52,7 @@ require ( github.com/charithe/durationcheck v0.0.11 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect github.com/charmbracelet/lipgloss v1.1.0 // indirect - github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/ansi v0.10.1 // indirect github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect github.com/charmbracelet/x/term v0.2.1 // indirect github.com/ckaznocha/intrange v0.3.1 // indirect @@ -69,7 +69,7 @@ require ( github.com/firefart/nonamedreturns v1.0.6 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.18 // indirect + github.com/ghostiam/protogetter v0.3.20 // indirect github.com/go-critic/go-critic v0.14.3 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect @@ -78,7 +78,7 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/go-viper/mapstructure/v2 v2.5.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/godoc-lint/godoc-lint v0.11.1 // indirect @@ -88,8 +88,8 @@ require ( github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golangci/go-printf-func-name v0.1.1 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect - github.com/golangci/golines v0.14.0 // indirect - github.com/golangci/misspell v0.7.0 // indirect + github.com/golangci/golines v0.15.0 // indirect + github.com/golangci/misspell v0.8.0 // indirect github.com/golangci/plugin-module-register v0.1.2 // indirect github.com/golangci/revgrep v0.8.0 // indirect github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect @@ -135,7 +135,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mgechev/revive v1.13.0 // indirect + github.com/mgechev/revive v1.14.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.3.2 // indirect @@ -143,7 +143,7 @@ require ( github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.21.2 // indirect + github.com/nunnatsa/ginkgolinter v0.22.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -166,7 +166,7 @@ require ( github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect github.com/securego/gosec/v2 v2.22.11 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sonatard/noctx v0.4.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect @@ -198,19 +198,18 @@ require ( gitlab.com/bosi/decorder v0.4.2 // indirect go-simpler.org/musttag v0.14.0 // indirect go-simpler.org/sloglint v0.11.1 // indirect - go.augendre.info/arangolint v0.3.1 // indirect + go.augendre.info/arangolint v0.4.0 // indirect go.augendre.info/fatcontext v0.9.0 // indirect - go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.10.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.31.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20251125195548-87e1e737ad39 // indirect + golang.org/x/mod v0.33.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/tools v0.40.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/tools v0.42.0 // indirect google.golang.org/protobuf v1.36.8 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/tools/linters/go.sum b/tools/linters/go.sum index bce8d374812dc..f837d9d685e2d 100644 --- a/tools/linters/go.sum +++ b/tools/linters/go.sum @@ -65,14 +65,14 @@ github.com/Djarvur/go-err113 v0.1.1 h1:eHfopDqXRwAi+YmCUas75ZE0+hoBHJ2GQNLYRSxao github.com/Djarvur/go-err113 v0.1.1/go.mod h1:IaWJdYFLg76t2ihfflPZnM1LIQszWOsFDh2hhhAVF6k= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/MirrexOne/unqueryvet v1.4.0 h1:6KAkqqW2KUnkl9Z0VuTphC3IXRPoFqEkJEtyxxHj5eQ= -github.com/MirrexOne/unqueryvet v1.4.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/MirrexOne/unqueryvet v1.5.3 h1:LpT3rsH+IY3cQddWF9bg4C7jsbASdGnrOSofY8IPEiw= +github.com/MirrexOne/unqueryvet v1.5.3/go.mod h1:fs9Zq6eh1LRIhsDIsxf9PONVUjYdFHdtkHIgZdJnyPU= github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.21.1 h1:FaSDrp6N+3pphkNKU6HPCiYLgm8dbe5UXIXcoBhZSWA= -github.com/alecthomas/chroma/v2 v2.21.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= +github.com/alecthomas/chroma/v2 v2.23.1 h1:nv2AVZdTyClGbVQkIzlDm/rnhk1E9bU9nXwmZ/Vk/iY= +github.com/alecthomas/chroma/v2 v2.23.1/go.mod h1:NqVhfBR0lte5Ouh3DcthuUCTUpDC9cxBOfyMbMQPs3o= github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= @@ -84,8 +84,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= -github.com/alexkohler/prealloc v1.0.1 h1:A9P1haqowqUxWvU9nk6tQ7YktXIHf+LQM9wPRhuteEE= -github.com/alexkohler/prealloc v1.0.1/go.mod h1:fT39Jge3bQrfA7nPMDngUfvUbQGQeJyGQnR+913SCig= +github.com/alexkohler/prealloc v1.0.2 h1:MPo8cIkGkZytq7WNH9UHv3DIX1mPz1RatPXnZb0zHWQ= +github.com/alexkohler/prealloc v1.0.2/go.mod h1:fT39Jge3bQrfA7nPMDngUfvUbQGQeJyGQnR+913SCig= github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= @@ -108,8 +108,8 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= -github.com/bombsimon/wsl/v5 v5.3.0 h1:nZWREJFL6U3vgW/B1lfDOigl+tEF6qgs6dGGbFeR0UM= -github.com/bombsimon/wsl/v5 v5.3.0/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= +github.com/bombsimon/wsl/v5 v5.6.0 h1:4z+/sBqC5vUmSp1O0mS+czxwH9+LKXtCWtHH9rZGQL8= +github.com/bombsimon/wsl/v5 v5.6.0/go.mod h1:Uqt2EfrMj2NV8UGoN1f1Y3m0NpUVCsUdrNCdet+8LvU= github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= @@ -133,8 +133,8 @@ github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4p github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= -github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= -github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ= +github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= @@ -182,8 +182,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.3.18 h1:yEpghRGtP9PjKvVXtEzGpYfQj1Wl/ZehAfU6fr62Lfo= -github.com/ghostiam/protogetter v0.3.18/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= +github.com/ghostiam/protogetter v0.3.20 h1:oW7OPFit2FxZOpmMRPP9FffU4uUpfeE/rEdE1f+MzD0= +github.com/ghostiam/protogetter v0.3.20/go.mod h1:FjIu5Yfs6FT391m+Fjp3fbAYJ6rkL/J6ySpZBfnODuI= github.com/go-critic/go-critic v0.14.3 h1:5R1qH2iFeo4I/RJU8vTezdqs08Egi4u5p6vOESA0pog= github.com/go-critic/go-critic v0.14.3/go.mod h1:xwntfW6SYAd7h1OqDzmN6hBX/JxsEKl5up/Y2bsxgVQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -221,8 +221,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= -github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro= +github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -269,12 +269,12 @@ github.com/golangci/go-printf-func-name v0.1.1 h1:hIYTFJqAGp1iwoIfsNTpoq1xZAarog github.com/golangci/go-printf-func-name v0.1.1/go.mod h1:Es64MpWEZbh0UBtTAICOZiB+miW53w/K9Or/4QogJss= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= -github.com/golangci/golangci-lint/v2 v2.8.0 h1:wJnr3hJWY3eVzOUcfwbDc2qbi2RDEpvLmQeNFaPSNYA= -github.com/golangci/golangci-lint/v2 v2.8.0/go.mod h1:xl+HafQ9xoP8rzw0z5AwnO5kynxtb80e8u02Ej/47RI= -github.com/golangci/golines v0.14.0 h1:xt9d3RKBjhasA3qpoXs99J2xN2t6eBlpLHt0TrgyyXc= -github.com/golangci/golines v0.14.0/go.mod h1:gf555vPG2Ia7mmy2mzmhVQbVjuK8Orw0maR1G4vVAAQ= -github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= -github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= +github.com/golangci/golangci-lint/v2 v2.9.0 h1:x5RRwa/jpNEjOnbSQSTam47QBsB50NMzCCUKe4GY/0U= +github.com/golangci/golangci-lint/v2 v2.9.0/go.mod h1:gAPaJitu6HsyiuwJYO+WtKT9WmCpQZHR+gkXlCw1bRY= +github.com/golangci/golines v0.15.0 h1:Qnph25g8Y1c5fdo1X7GaRDGgnMHgnxh4Gk4VfPTtRx0= +github.com/golangci/golines v0.15.0/go.mod h1:AZjXd23tbHMpowhtnGlj9KCNsysj72aeZVVHnVcZx10= +github.com/golangci/misspell v0.8.0 h1:qvxQhiE2/5z+BVRo1kwYA8yGz+lOlu5Jfvtx2b04Jbg= +github.com/golangci/misspell v0.8.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= @@ -428,8 +428,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= -github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= +github.com/mgechev/revive v1.14.0 h1:CC2Ulb3kV7JFYt+izwORoS3VT/+Plb8BvslI/l1yZsc= +github.com/mgechev/revive v1.14.0/go.mod h1:MvnujelCZBZCaoDv5B3foPo6WWgULSSFxvfxp7GsPfo= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -453,8 +453,8 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.21.2 h1:khzWfm2/Br8ZemX8QM1pl72LwM+rMeW6VUbQ4rzh0Po= -github.com/nunnatsa/ginkgolinter v0.21.2/go.mod h1:GItSI5fw7mCGLPmkvGYrr1kEetZe7B593jcyOpyabsY= +github.com/nunnatsa/ginkgolinter v0.22.0 h1:o9g7JN6efdBxAHhejvPkodEjWsOBze9zDnPePsvC/Qg= +github.com/nunnatsa/ginkgolinter v0.22.0/go.mod h1:zIFAk36fhcHQIiYOGXLbrGTXz7cvpufhRYem6ToCVnY= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= @@ -475,8 +475,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -539,8 +537,8 @@ github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOms github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= @@ -572,7 +570,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= @@ -624,8 +621,8 @@ go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= -go.augendre.info/arangolint v0.3.1 h1:n2E6p8f+zfXSFLa2e2WqFPp4bfvcuRdd50y6cT65pSo= -go.augendre.info/arangolint v0.3.1/go.mod h1:6ZKzEzIZuBQwoSvlKT+qpUfIbBfFCE5gbAoTg0/117g= +go.augendre.info/arangolint v0.4.0 h1:xSCZjRoS93nXazBSg5d0OGCi9APPLNMmmLrC995tR50= +go.augendre.info/arangolint v0.4.0/go.mod h1:l+f/b4plABuFISuKnTGD4RioXiCCgghv2xqst/xOvAA= go.augendre.info/fatcontext v0.9.0 h1:Gt5jGD4Zcj8CDMVzjOJITlSb9cEch54hjRRlN3qDojE= go.augendre.info/fatcontext v0.9.0/go.mod h1:L94brOAT1OOUNue6ph/2HnwxoNlds9aXDF2FcUntbNw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -633,8 +630,6 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= -go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= @@ -662,12 +657,12 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE= -golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= +golang.org/x/exp/typeparams v0.0.0-20251125195548-87e1e737ad39 h1:yzGKB4T4r1nFi65o7dQ96ERTfU2trk8Ige9aqqADqf4= +golang.org/x/exp/typeparams v0.0.0-20251125195548-87e1e737ad39/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -695,8 +690,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -735,8 +730,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= +golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -804,17 +799,16 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= -golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4 h1:bTLqdHv7xrGlFbvf5/TXNxy/iUwwdkjhqQTJDjW7aj0= +golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -831,8 +825,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -886,8 +880,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -991,7 +985,6 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 617c6dbcc9141fe8296b2895c1b60b6b8e33ef0f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:38:55 +0100 Subject: [PATCH 181/232] chore(deps): bump sigs.k8s.io/kustomize/kustomize/v5 from 5.8.0 to 5.8.1 in /operator/tools/kustomize (#18962) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- operator/tools/kustomize/go.mod | 11 +++++------ operator/tools/kustomize/go.sum | 22 ++++++++++------------ 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/operator/tools/kustomize/go.mod b/operator/tools/kustomize/go.mod index 04e1e2cbdb304..0760be444bceb 100644 --- a/operator/tools/kustomize/go.mod +++ b/operator/tools/kustomize/go.mod @@ -2,7 +2,7 @@ module github.com/stackrox/rox/operator/tools/kustomize go 1.25 -require sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 +require sigs.k8s.io/kustomize/kustomize/v5 v5.8.1 require ( github.com/blang/semver/v4 v4.0.0 // indirect @@ -16,7 +16,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect @@ -26,12 +25,12 @@ require ( golang.org/x/sys v0.35.0 // indirect golang.org/x/text v0.28.0 // indirect google.golang.org/protobuf v1.36.5 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect - sigs.k8s.io/kustomize/api v0.21.0 // indirect - sigs.k8s.io/kustomize/cmd/config v0.21.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.21.0 // indirect + sigs.k8s.io/kustomize/api v0.21.1 // indirect + sigs.k8s.io/kustomize/cmd/config v0.21.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.21.1 // indirect sigs.k8s.io/yaml v1.5.0 // indirect ) diff --git a/operator/tools/kustomize/go.sum b/operator/tools/kustomize/go.sum index 570217d2f9094..ce3be4ca9c359 100644 --- a/operator/tools/kustomize/go.sum +++ b/operator/tools/kustomize/go.sum @@ -37,8 +37,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -80,8 +78,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -93,13 +91,13 @@ k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8X k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.21.0 h1:I7nry5p8iDJbuRdYS7ez8MUvw7XVNPcIP5GkzzuXIIQ= -sigs.k8s.io/kustomize/api v0.21.0/go.mod h1:XGVQuR5n2pXKWbzXHweZU683pALGw/AMVO4zU4iS8SE= -sigs.k8s.io/kustomize/cmd/config v0.21.0 h1:ikLtzcNK9isBqSaXXhAg7LRCTNKdp70z5v/c4Y55DOw= -sigs.k8s.io/kustomize/cmd/config v0.21.0/go.mod h1:oxa6eRzeLWUcE7M3Rmio29Sfc4KpqGspHur3GjOYqNA= -sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 h1:CCIJK7z/xJOlkXOaDOcL2jprV53a/eloiL02wg7oJJs= -sigs.k8s.io/kustomize/kustomize/v5 v5.8.0/go.mod h1:qewGAExYZK9LbPPbnJMPK5HQ8nsdxRzpclIg0qslzDo= -sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= -sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= +sigs.k8s.io/kustomize/api v0.21.1 h1:lzqbzvz2CSvsjIUZUBNFKtIMsEw7hVLJp0JeSIVmuJs= +sigs.k8s.io/kustomize/api v0.21.1/go.mod h1:f3wkKByTrgpgltLgySCntrYoq5d3q7aaxveSagwTlwI= +sigs.k8s.io/kustomize/cmd/config v0.21.1 h1:/gxf3J1rQD9nfuL8fHlrTLeUL+JHWbK44eOnXJDYx0M= +sigs.k8s.io/kustomize/cmd/config v0.21.1/go.mod h1:7yEFYBJyBJlpZQ50VaRGQRtFMn3Vzn9Fb2wts4TCok4= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.1 h1:Pgsg5psubpVEy7Nf6S89PARg5VmmWUC1l9dC6Dl4PG0= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.1/go.mod h1:0vFa5pQ/elNEQMyiAJuGku9rhAMzz7u9+61hRqFKiwY= +sigs.k8s.io/kustomize/kyaml v0.21.1 h1:IVlbmhC076nf6foyL6Taw4BkrLuEsXUXNpsE+ScX7fI= +sigs.k8s.io/kustomize/kyaml v0.21.1/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ= sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4= From 3998652a7abee1920256612f690cefe4c3206e34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:39:10 +0100 Subject: [PATCH 182/232] chore(deps): bump google.golang.org/api from 0.265.0 to 0.266.0 (#18964) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 18bf2f5a603c8..6c1f9cbfc5b92 100644 --- a/go.mod +++ b/go.mod @@ -142,7 +142,7 @@ require ( golang.org/x/crypto v0.47.0 golang.org/x/mod v0.32.0 golang.org/x/net v0.49.0 - golang.org/x/oauth2 v0.34.0 + golang.org/x/oauth2 v0.35.0 golang.org/x/sync v0.19.0 golang.org/x/sys v0.40.0 golang.org/x/term v0.39.0 @@ -150,7 +150,7 @@ require ( golang.org/x/time v0.14.0 golang.org/x/tools v0.41.0 golang.stackrox.io/grpc-http1 v0.5.1 - google.golang.org/api v0.265.0 + google.golang.org/api v0.266.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 google.golang.org/grpc v1.78.0 @@ -504,7 +504,7 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect diff --git a/go.sum b/go.sum index 848371556b744..1e8adbcab9284 100644 --- a/go.sum +++ b/go.sum @@ -2177,8 +2177,8 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU= -google.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY= +google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk= +google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2254,8 +2254,8 @@ google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH8 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 h1:H86B94AW+VfJWDqFeEbBPhEtHzJwJfTbgE2lZa54ZAQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From bfcce03fe5cc8cc4600aa4e0b56fd05277732e33 Mon Sep 17 00:00:00 2001 From: Dmitrii Dolgov <9erthalion6@gmail.com> Date: Fri, 13 Feb 2026 17:43:04 +0100 Subject: [PATCH 183/232] ROX-33163: Fix default resources (#19021) The default fact resources has a duplicated resources key, which prevents helm from populating the resources section of the DaemonSet. Remove the duplication. --- .../internal/defaults/40-resources.yaml | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml b/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml index eab9f4a010ff6..1c7672394c9ce 100644 --- a/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml +++ b/image/templates/helm/stackrox-secured-cluster/internal/defaults/40-resources.yaml @@ -44,10 +44,9 @@ collector: cpu: "1" famResources: - resources: - requests: - memory: "320Mi" - cpu: "50m" - limits: - memory: "1Gi" - cpu: "750m" + requests: + memory: "320Mi" + cpu: "50m" + limits: + memory: "1Gi" + cpu: "750m" From 4957c5be3eed036de692722d82dcde63ac774b72 Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Fri, 13 Feb 2026 19:12:04 +0000 Subject: [PATCH 184/232] fix: auto-merge and approve (#19030) --- .github/workflows/auto-merge.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 3da8b53614d5f..5789969c2a8da 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -84,14 +84,13 @@ jobs: continue fi - # All conditions met - enable auto-merge + echo "::notice::Enabling auto-merge for PR #$PR_NUMBER" + gh pr merge --repo "${{ github.repository }}" \ + --auto --squash "$PR_NUMBER" echo "::notice::Approving PR #$PR_NUMBER" gh pr review --repo "${{ github.repository }}" \ --approve "$PR_NUMBER" || true - echo "::notice::Enabling auto-merge for PR #$PR_NUMBER" - gh pr merge --repo "${{ github.repository }}" \ - --auto --squash "$PR_NUMBER" echo "::notice::✓ Auto-merge enabled for PR #$PR_NUMBER" done From b53fea12c93ccf74a804a372b979db1833b71e46 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 12:47:49 -0700 Subject: [PATCH 185/232] chore(deps): bump the golang-org-x group across 1 directory with 7 updates (#18936) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 14 +++++++------- go.sum | 28 ++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 6c1f9cbfc5b92..5d90a1b455cda 100644 --- a/go.mod +++ b/go.mod @@ -139,16 +139,16 @@ require ( go.uber.org/mock v0.6.0 go.uber.org/zap v1.27.1 go.yaml.in/yaml/v3 v3.0.4 - golang.org/x/crypto v0.47.0 - golang.org/x/mod v0.32.0 - golang.org/x/net v0.49.0 + golang.org/x/crypto v0.48.0 + golang.org/x/mod v0.33.0 + golang.org/x/net v0.50.0 golang.org/x/oauth2 v0.35.0 golang.org/x/sync v0.19.0 - golang.org/x/sys v0.40.0 - golang.org/x/term v0.39.0 - golang.org/x/text v0.33.0 + golang.org/x/sys v0.41.0 + golang.org/x/term v0.40.0 + golang.org/x/text v0.34.0 golang.org/x/time v0.14.0 - golang.org/x/tools v0.41.0 + golang.org/x/tools v0.42.0 golang.stackrox.io/grpc-http1 v0.5.1 google.golang.org/api v0.266.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 diff --git a/go.sum b/go.sum index 1e8adbcab9284..8714e98276639 100644 --- a/go.sum +++ b/go.sum @@ -1780,8 +1780,8 @@ golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZP golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= -golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= +golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1826,8 +1826,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= -golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1893,8 +1893,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= -golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= +golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2014,8 +2014,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2029,8 +2029,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= -golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= +golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= +golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2047,8 +2047,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= -golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2126,8 +2126,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= -golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= From 6bea17e5b33de65d7c2302ff963530fd8ced4b0c Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Fri, 13 Feb 2026 13:52:01 -0600 Subject: [PATCH 186/232] ROX-33051: Update `v2` bundle to use new timeout for `rhel-vex` (#18996) --- scanner/updater/version/VULNERABILITY_BUNDLE_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION index 555de35e6e78f..6039352b691cd 100644 --- a/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION +++ b/scanner/updater/version/VULNERABILITY_BUNDLE_VERSION @@ -6,4 +6,4 @@ # This process ensures that each version's vulnerability data is accurately captured and maintained in accordance with its respective version. dev heads/master v1 4.5.2 -v2 4.8.4 +v2 heads/2bcc59d86d479a560c8675e75b166aaa0cf3babc From e20c8a141d56d8e103b92bdbec23db05589e2c14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 13:16:51 -0700 Subject: [PATCH 187/232] chore(deps): bump cloud.google.com/go/storage from 1.59.2 to 1.60.0 (#18965) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 24 ++++++++++++------------ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 5d90a1b455cda..1bec9f4181d8e 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( cloud.google.com/go/compute/metadata v0.9.0 cloud.google.com/go/containeranalysis v0.14.2 cloud.google.com/go/securitycenter v1.38.1 - cloud.google.com/go/storage v1.59.2 + cloud.google.com/go/storage v1.60.0 dario.cat/mergo v1.0.2 github.com/Azure/azure-sdk-for-go-extensions v0.5.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 @@ -152,7 +152,7 @@ require ( golang.stackrox.io/grpc-http1 v0.5.1 google.golang.org/api v0.266.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 - google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 + google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 google.golang.org/grpc v1.78.0 google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 google.golang.org/protobuf v1.36.11 @@ -203,8 +203,8 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/DataDog/zstd v1.5.7 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect diff --git a/go.sum b/go.sum index 8714e98276639..91378dd000cb7 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.59.2 h1:gmOAuG1opU8YvycMNpP+DvHfT9BfzzK5Cy+arP+Nocw= -cloud.google.com/go/storage v1.59.2/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= +cloud.google.com/go/storage v1.60.0 h1:oBfZrSOCimggVNz9Y/bXY35uUcts7OViubeddTTVzQ8= +cloud.google.com/go/storage v1.60.0/go.mod h1:q+5196hXfejkctrnx+VYU8RKQr/L3c0cBIlrjmiAKE0= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= @@ -158,12 +158,12 @@ github.com/DataDog/zstd v1.5.7 h1:ybO8RBeh29qrxIhCA9E8gKY6xfONU9T6G6aP9DTKfLE= github.com/DataDog/zstd v1.5.7/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0/go.mod h1:l9rva3ApbBpEJxSNYnwT9N4CDLrWgtq3u8736C5hyJw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0 h1:xfK3bbi6F2RDtaZFtUdKO3osOBIhNb+xTs8lFW6yx9o= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -1704,8 +1704,8 @@ go.opentelemetry.io/otel/exporters/prometheus v0.60.0 h1:cGtQxGvZbnrWdC2GyjZi0PD go.opentelemetry.io/otel/exporters/prometheus v0.60.0/go.mod h1:hkd1EekxNo69PTV4OWFGZcKQiIqg0RfuWExcPKFvepk= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0 h1:wm/Q0GAAykXv83wzcKzGGqAnnfLFyFe7RslekZuv+VI= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.38.0/go.mod h1:ra3Pa40+oKjvYh+ZD3EdxFZZB0xdMfuileHAm4nNN7w= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= @@ -2252,8 +2252,8 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= -google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0= +google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE= google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= From a72530e9083e5c7491d37d4d853dab2fffb1acc3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 15:15:08 -0700 Subject: [PATCH 188/232] chore(deps): bump cloud.google.com/go/artifactregistry from 1.19.0 to 1.20.0 (#19012) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1bec9f4181d8e..00591557777bb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/stackrox/rox go 1.25.0 require ( - cloud.google.com/go/artifactregistry v1.19.0 + cloud.google.com/go/artifactregistry v1.20.0 cloud.google.com/go/compute/metadata v0.9.0 cloud.google.com/go/containeranalysis v0.14.2 cloud.google.com/go/securitycenter v1.38.1 diff --git a/go.sum b/go.sum index 91378dd000cb7..ee405d3aa5c1d 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0c cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= -cloud.google.com/go/artifactregistry v1.19.0 h1:DaOHWeURq93K27/6Sa2fy3rJoftrVXKeT3tonM4fxtI= -cloud.google.com/go/artifactregistry v1.19.0/go.mod h1:UEAPCgHDFC1q+A8nnVxXHPEy9KCVOeavFBF1fEChQvU= +cloud.google.com/go/artifactregistry v1.20.0 h1:j/XQiQfaeTyQeNj3HNk4iDFREVnY/fxkHIjsxpaDs8A= +cloud.google.com/go/artifactregistry v1.20.0/go.mod h1:0G9wdbGyDFkvrYH+2AlQs9MuTJdbY8Vg45M8VjlI8rc= cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= From 1dd5b7433fba07bb99d8dce704dc9f2ea6529986 Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Fri, 13 Feb 2026 16:43:03 -0600 Subject: [PATCH 189/232] ROX-33051: Add 'dry-run' flag to scanner vuln workflow (#18995) --- .../scanner-versioned-definitions-update.yaml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/workflows/scanner-versioned-definitions-update.yaml b/.github/workflows/scanner-versioned-definitions-update.yaml index ede14a5fce80f..536a7301dbf64 100644 --- a/.github/workflows/scanner-versioned-definitions-update.yaml +++ b/.github/workflows/scanner-versioned-definitions-update.yaml @@ -15,7 +15,7 @@ on: required: false ga_ref: description: > - WARNING - THIS WLL UPDATE A GA BUNDLE STREAM: Sets the git reference + WARNING - THIS WLL UPDATE A GA BUNDLE STREAM if not a dry-run: Sets the git reference to build a bundle that will be uploaded to the GA version stream. required: false rc_ref: @@ -32,6 +32,10 @@ on: - nvd-feeds required: true default: nvd-feeds + dry_run: + description: "Dry-run" + default: false + type: boolean jobs: parse-versions: @@ -50,6 +54,7 @@ jobs: INPUT_GA_REF: ${{ github.event.inputs.ga_ref || '' }} INPUT_RC_REF: ${{ github.event.inputs.rc_ref || '' }} INPUT_STREAM: ${{ github.event.inputs.stream || '' }} + INPUT_DRY_RUN: ${{ github.event.inputs.dry_run || 'false' }} run: | set -o pipefail @@ -73,8 +78,11 @@ jobs: allow_rc=true fi if [ -z "$ref" ]; then - echo >&2 "abort: invalid input: could not determine the reference" - exit 1 + # Allow empty ref when dry_run is true (follow normal flow) + if [ "$INPUT_DRY_RUN" == "false" ]; then + echo >&2 "abort: invalid input: could not determine the reference" + exit 1 + fi fi # Configure environment to generate the matrix with versions and @@ -370,7 +378,7 @@ jobs: upload-definitions: needs: - build-and-run - if: ${{ (failure() || success()) && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') }} + if: ${{ (failure() || success()) && (github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run == 'false')) }} runs-on: ubuntu-latest steps: # Checkout to run ./.github/actions/download-artifact-with-retry @@ -484,7 +492,7 @@ jobs: - build-and-run - upload-definitions runs-on: ubuntu-latest - if: ${{ failure() && github.ref_name == 'master' }} + if: ${{ failure() && github.ref_name == 'master' && (github.event_name == 'schedule' || github.event.inputs.dry_run == 'false') }} steps: - name: Send Slack notification on workflow failure run: | From b325e78804ac7e96dd0ad1a2d0c6294ca842b81d Mon Sep 17 00:00:00 2001 From: David Caravello <119438707+dcaravel@users.noreply.github.com> Date: Fri, 13 Feb 2026 17:36:28 -0600 Subject: [PATCH 190/232] ROX-30569: Add SBOM Scanning REST API to Central (#18484) --- central/image/service/http_handler.go | 49 +- central/image/service/http_handler_test.go | 18 +- .../image/service/sbom_scan_http_handler.go | 149 +++ .../service/sbom_scan_http_handler_test.go | 170 +++ central/main.go | 8 +- generated/api/v1/sbom.pb.go | 228 ++++ generated/api/v1/sbom.swagger.json | 46 + generated/api/v1/sbom_vtproto.pb.go | 1017 +++++++++++++++++ pkg/env/image_scan.go | 3 + pkg/features/list.go | 3 + pkg/scanners/scannerv4/scannerv4.go | 139 +++ pkg/scanners/types/mocks/types.go | 32 + pkg/scanners/types/types.go | 8 +- proto/api/v1/sbom.proto | 31 + 14 files changed, 1869 insertions(+), 32 deletions(-) create mode 100644 central/image/service/sbom_scan_http_handler.go create mode 100644 central/image/service/sbom_scan_http_handler_test.go create mode 100644 generated/api/v1/sbom.pb.go create mode 100644 generated/api/v1/sbom.swagger.json create mode 100644 generated/api/v1/sbom_vtproto.pb.go create mode 100644 proto/api/v1/sbom.proto diff --git a/central/image/service/http_handler.go b/central/image/service/http_handler.go index ddb13ab8da39c..367a9d50149b7 100644 --- a/central/image/service/http_handler.go +++ b/central/image/service/http_handler.go @@ -24,12 +24,13 @@ import ( "github.com/stackrox/rox/pkg/images/types" "github.com/stackrox/rox/pkg/images/utils" "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/scanners" scannerTypes "github.com/stackrox/rox/pkg/scanners/types" "github.com/stackrox/rox/pkg/zip" "google.golang.org/grpc/codes" ) -type sbomHttpHandler struct { +type sbomGenHttpHandler struct { integration integration.Set enricher enricher.ImageEnricher enricherV2 enricher.ImageEnricherV2 @@ -37,11 +38,11 @@ type sbomHttpHandler struct { riskManager manager.Manager } -var _ http.Handler = (*sbomHttpHandler)(nil) +var _ http.Handler = (*sbomGenHttpHandler)(nil) // SBOMHandler returns a handler for get sbom http request. -func SBOMHandler(integration integration.Set, enricher enricher.ImageEnricher, enricherV2 enricher.ImageEnricherV2, clusterSACHelper sachelper.ClusterSacHelper, riskManager manager.Manager) http.Handler { - return sbomHttpHandler{ +func SBOMGenHandler(integration integration.Set, enricher enricher.ImageEnricher, enricherV2 enricher.ImageEnricherV2, clusterSACHelper sachelper.ClusterSacHelper, riskManager manager.Manager) http.Handler { + return sbomGenHttpHandler{ integration: integration, enricher: enricher, enricherV2: enricherV2, @@ -50,7 +51,7 @@ func SBOMHandler(integration integration.Set, enricher enricher.ImageEnricher, e } } -func (h sbomHttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h sbomGenHttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return @@ -94,7 +95,7 @@ func (h sbomHttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // enrichWithModelSwitch enriches an image by name, returning both V1 and V2 models based on the feature flag. -func (h sbomHttpHandler) enrichWithModelSwitch( +func (h sbomGenHttpHandler) enrichWithModelSwitch( ctx context.Context, enrichmentCtx enricher.EnrichmentContext, ci *storage.ContainerImage, @@ -121,7 +122,7 @@ func (h sbomHttpHandler) enrichWithModelSwitch( } // enrichImage enriches the image with the given name and based on the given enrichment context. -func (h sbomHttpHandler) enrichImage(ctx context.Context, enrichmentCtx enricher.EnrichmentContext, ci *storage.ContainerImage) (*storage.Image, bool, error) { +func (h sbomGenHttpHandler) enrichImage(ctx context.Context, enrichmentCtx enricher.EnrichmentContext, ci *storage.ContainerImage) (*storage.Image, bool, error) { // forcedEnrichment is set to true when enrichImage forces an enrichment. forcedEnrichment := false @@ -167,7 +168,7 @@ func errorOrNotScanned(enrichmentResult enricher.EnrichmentResult, err error) er } // getSBOM generates an SBOM for the specified parameters. -func (h sbomHttpHandler) getSBOM(ctx context.Context, params apiparams.SBOMRequestBody) ([]byte, error) { +func (h sbomGenHttpHandler) getSBOM(ctx context.Context, params apiparams.SBOMRequestBody) ([]byte, error) { enrichmentCtx := enricher.EnrichmentContext{ Delegable: true, FetchOpt: enricher.UseCachesIfPossible, @@ -268,25 +269,18 @@ func addForceToEnrichmentContext(enrichmentCtx *enricher.EnrichmentContext) { } // getScannerV4SBOMIntegration returns the SBOM interface of Scanner V4. -func (h sbomHttpHandler) getScannerV4SBOMIntegration() (scannerTypes.SBOMer, error) { - scanners := h.integration.ScannerSet() - for _, scanner := range scanners.GetAll() { - if scanner.GetScanner().Type() == scannerTypes.ScannerV4 { - if scannerv4, ok := scanner.GetScanner().(scannerTypes.SBOMer); ok { - return scannerv4, nil - } - } - } - return nil, errors.New("Scanner V4 integration not found") +func (h sbomGenHttpHandler) getScannerV4SBOMIntegration() (scannerTypes.SBOMer, error) { + sbomer, _, err := getScannerV4SBOMIntegration(h.integration.ScannerSet()) + return sbomer, err } // scannedByScannerV4 checks if image is scanned by Scanner V4. -func (h sbomHttpHandler) scannedByScannerV4(scan *storage.ImageScan) bool { +func (h sbomGenHttpHandler) scannedByScannerV4(scan *storage.ImageScan) bool { return scan.GetDataSource().GetId() == iiStore.DefaultScannerV4Integration.GetId() } // saveImage saves the image to Central's database. -func (h sbomHttpHandler) saveImage(img *storage.Image, imgV2 *storage.ImageV2) error { +func (h sbomGenHttpHandler) saveImage(img *storage.Image, imgV2 *storage.ImageV2) error { if features.FlattenImageData.Enabled() { return h.saveImageV2(imgV2) } @@ -294,7 +288,7 @@ func (h sbomHttpHandler) saveImage(img *storage.Image, imgV2 *storage.ImageV2) e } // saveImageV1 saves an Image V1 to Central's database. -func (h sbomHttpHandler) saveImageV1(img *storage.Image) error { +func (h sbomGenHttpHandler) saveImageV1(img *storage.Image) error { img.Id = utils.GetSHA(img) if img.GetId() == "" { return nil @@ -308,7 +302,7 @@ func (h sbomHttpHandler) saveImageV1(img *storage.Image) error { } // saveImageV2 saves an Image V2 to Central's database. -func (h sbomHttpHandler) saveImageV2(imgV2 *storage.ImageV2) error { +func (h sbomGenHttpHandler) saveImageV2(imgV2 *storage.ImageV2) error { if imgV2 == nil { return errors.New("nil images cannot be saved") } @@ -328,3 +322,14 @@ func (h sbomHttpHandler) saveImageV2(imgV2 *storage.ImageV2) error { } return nil } + +func getScannerV4SBOMIntegration(scanners scanners.Set) (scannerTypes.SBOMer, *storage.DataSource, error) { + for _, scanner := range scanners.GetAll() { + if scanner.GetScanner().Type() == scannerTypes.ScannerV4 { + if scannerv4, ok := scanner.GetScanner().(scannerTypes.SBOMer); ok { + return scannerv4, scanner.DataSource(), nil + } + } + } + return nil, nil, errors.New("Scanner V4 integration not found") +} diff --git a/central/image/service/http_handler_test.go b/central/image/service/http_handler_test.go index aa3664cf315d9..76ebabcdc1ba0 100644 --- a/central/image/service/http_handler_test.go +++ b/central/image/service/http_handler_test.go @@ -56,7 +56,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodGet, "/sbom", nil) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), nil, nil, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), nil, nil, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -99,7 +99,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewReader(reqJson)) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), mockEnricher, mockEnricherV2, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), mockEnricher, mockEnricherV2, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -142,6 +142,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { scanner.EXPECT().GetSBOM(gomock.Any()).DoAndReturn(getFakeSBOM).AnyTimes() set.EXPECT().ScannerSet().Return(scannerSet).AnyTimes() fsr.EXPECT().GetScanner().Return(scanner).AnyTimes() + fsr.EXPECT().DataSource().Return(nil).AnyTimes() scannerSet.EXPECT().GetAll().Return([]scannerTypes.ImageScannerWithDataSource{fsr}).AnyTimes() reqBody := &apiparams.SBOMRequestBody{ @@ -154,7 +155,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewReader(reqJson)) recorder := httptest.NewRecorder() - handler := SBOMHandler(set, mockEnricher, mockEnricherV2, nil, nil) + handler := SBOMGenHandler(set, mockEnricher, mockEnricherV2, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -172,7 +173,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewBufferString(invalidJson)) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), nil, nil, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), nil, nil, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -189,7 +190,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewReader(reqBody)) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), nil, nil, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), nil, nil, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -205,7 +206,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", nil) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), nil, nil, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), nil, nil, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -222,7 +223,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewReader(largeRequestBody)) recorder := httptest.NewRecorder() - handler := SBOMHandler(imageintegration.Set(), nil, nil, nil, nil) + handler := SBOMGenHandler(imageintegration.Set(), nil, nil, nil, nil) handler.ServeHTTP(recorder, req) res := recorder.Result() @@ -280,6 +281,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { mockImageScannerWithDS := scannerTypesMocks.NewMockImageScannerWithDataSource(ctrl) mockImageScannerWithDS.EXPECT().GetScanner().Return(mockScanner).AnyTimes() + mockImageScannerWithDS.EXPECT().DataSource().Return(nil).AnyTimes() mockScannerSet := scannerMocks.NewMockSet(ctrl) mockScannerSet.EXPECT().GetAll().Return([]scannerTypes.ImageScannerWithDataSource{mockImageScannerWithDS}).AnyTimes() @@ -312,7 +314,7 @@ func TestHttpHandler_ServeHTTP(t *testing.T) { assert.NoError(t, err) req := httptest.NewRequest(http.MethodPost, "/sbom", bytes.NewReader(reqJson)) recorder := httptest.NewRecorder() - handler := SBOMHandler(mockIntegrationSet, mockEnricher, mockEnricherV2, nil, mockRiskManager) + handler := SBOMGenHandler(mockIntegrationSet, mockEnricher, mockEnricherV2, nil, mockRiskManager) // Make the SBOM generation request. handler.ServeHTTP(recorder, req) diff --git a/central/image/service/sbom_scan_http_handler.go b/central/image/service/sbom_scan_http_handler.go new file mode 100644 index 0000000000000..4402acd0cde3f --- /dev/null +++ b/central/image/service/sbom_scan_http_handler.go @@ -0,0 +1,149 @@ +package service + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + + "github.com/pkg/errors" + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/env" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/httputil" + "github.com/stackrox/rox/pkg/images/integration" + "github.com/stackrox/rox/pkg/ioutils" + scannerTypes "github.com/stackrox/rox/pkg/scanners/types" + "github.com/stackrox/rox/pkg/set" + "google.golang.org/grpc/codes" + "google.golang.org/protobuf/encoding/protojson" +) + +var ( + supportedMediaTypes = set.NewFrozenStringSet( + "text/spdx+json", // Used by Sigstore/Cosign, not IANA registered. + "application/spdx+json", // IANA registered type for SPDX JSON. + ) +) + +type sbomScanHttpHandler struct { + integrations integration.Set +} + +var _ http.Handler = (*sbomScanHttpHandler)(nil) + +func SBOMScanHandler(integrations integration.Set) http.Handler { + return sbomScanHttpHandler{ + integrations: integrations, + } +} + +func (s sbomScanHttpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Verify Scanner V4 is enabled. + if !features.ScannerV4.Enabled() { + httputil.WriteGRPCStyleError(w, codes.Unimplemented, errors.New("Scanner V4 is disabled.")) + return + } + + if !features.SBOMScanning.Enabled() { + httputil.WriteGRPCStyleError(w, codes.Unimplemented, errors.New("SBOM Scanning is disabled.")) + return + } + + // Only POST requests are supported. + if r.Method != http.MethodPost { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + // Validate the media type is supported. + contentType := r.Header.Get("Content-Type") + err := s.validateMediaType(contentType) + if err != nil { + httputil.WriteGRPCStyleError(w, codes.InvalidArgument, fmt.Errorf("validating media type: %w", err)) + return + } + + // Enforce maximum uncompressed request size to prevent excessive memory usage. + // MaxBytesReader returns an error if the request body exceeds the limit. + maxReqSizeBytes := env.SBOMScanMaxReqSizeBytes.IntegerSetting() + limitedBody := http.MaxBytesReader(w, r.Body, int64(maxReqSizeBytes)) + + // Add cancellation safety to prevent partial/corrupted data on interruption. + // InterruptibleReader: Ensures clean termination without partial reads. + body, interrupt := ioutils.NewInterruptibleReader(limitedBody) + defer interrupt() + + // ContextBoundReader: Ensures reads fail fast when request context is canceled. + // This prevents hanging reads on connection interruption + readCtx, cancel := context.WithCancel(r.Context()) + defer cancel() + body = ioutils.NewContextBoundReader(readCtx, body) + + sbomScanResponse, err := s.scanSBOM(readCtx, body, contentType) + if err != nil { + // Check if error is due to request body exceeding size limit. + var maxBytesErr *http.MaxBytesError + if errors.As(err, &maxBytesErr) { + httputil.WriteGRPCStyleError(w, codes.InvalidArgument, fmt.Errorf("request body exceeds maximum size of %d bytes", maxBytesErr.Limit)) + return + } + httputil.WriteGRPCStyleError(w, codes.Internal, fmt.Errorf("scanning SBOM: %w", err)) + return + } + + // Serialize the scan result to JSON using protojson for proper protobuf handling. + // protojson handles protobuf-specific types (enums, oneof, etc.) correctly. + jsonBytes, err := protojson.MarshalOptions{Multiline: true}.Marshal(sbomScanResponse) + if err != nil { + httputil.WriteGRPCStyleError(w, codes.Internal, fmt.Errorf("serializing SBOM scan response: %w", err)) + return + } + + // Set response headers and write JSON response. + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(jsonBytes); err != nil { + log.Warnw("writing SBOM scan response: %v", err) + return + } +} + +// scanSBOM will request a scan of the SBOM from Scanner V4. +func (s sbomScanHttpHandler) scanSBOM(ctx context.Context, limitedReader io.Reader, contentType string) (*v1.SBOMScanResponse, error) { + // Get reference to Scanner V4. + scannerV4, dataSource, err := s.getScannerV4Integration() + if err != nil { + return nil, fmt.Errorf("getting Scanner V4 integration: %w", err) + } + + // Scan the SBOM. + sbomScanResponse, err := scannerV4.ScanSBOM(ctx, limitedReader, contentType) + if err != nil { + return nil, fmt.Errorf("scanning sbom: %w", err) + } + // Set the scan DataSource used to do the scan. + if sbomScanResponse.GetScan() != nil { + sbomScanResponse.GetScan().DataSource = dataSource + } + + return sbomScanResponse, nil +} + +// getScannerV4Integration returns the SBOM interface of Scanner V4. +func (s sbomScanHttpHandler) getScannerV4Integration() (scannerTypes.SBOMer, *storage.DataSource, error) { + sbomer, dataSource, err := getScannerV4SBOMIntegration(s.integrations.ScannerSet()) + return sbomer, dataSource, err +} + +// validateMediaType validates the media type from the content type header is supported. +func (s sbomScanHttpHandler) validateMediaType(contentType string) error { + // Strip any parameters (e.g., charset) from the media type + mediaType := strings.TrimSpace(strings.Split(contentType, ";")[0]) + if !supportedMediaTypes.Contains(mediaType) { + return fmt.Errorf("unsupported media type %q, supported types %v", mediaType, supportedMediaTypes.AsSlice()) + } + + return nil +} diff --git a/central/image/service/sbom_scan_http_handler_test.go b/central/image/service/sbom_scan_http_handler_test.go new file mode 100644 index 0000000000000..f895204ff9a36 --- /dev/null +++ b/central/image/service/sbom_scan_http_handler_test.go @@ -0,0 +1,170 @@ +package service + +import ( + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/pkg/errors" + "github.com/stackrox/rox/central/imageintegration" + v1 "github.com/stackrox/rox/generated/api/v1" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/features" + intergrationMocks "github.com/stackrox/rox/pkg/images/integration/mocks" + scannerMocks "github.com/stackrox/rox/pkg/scanners/mocks" + scannerTypes "github.com/stackrox/rox/pkg/scanners/types" + scannerTypesMocks "github.com/stackrox/rox/pkg/scanners/types/mocks" + "github.com/stackrox/rox/pkg/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestScanSBOMHttpHandler_ServeHTTP(t *testing.T) { + t.Run("scanner v4 disabled", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, false) + + req := httptest.NewRequest(http.MethodGet, "/sbom", nil) + recorder := httptest.NewRecorder() + + handler := SBOMScanHandler(imageintegration.Set()) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusNotImplemented, res.StatusCode) + assert.Contains(t, string(body), "Scanner V4 is disabled") + }) + + t.Run("invalid request method", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, true) + + req := httptest.NewRequest(http.MethodGet, "/sbom", nil) + recorder := httptest.NewRecorder() + + handler := SBOMScanHandler(imageintegration.Set()) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusNotImplemented, res.StatusCode) + assert.Contains(t, string(body), "SBOM Scanning is disabled") + }) + + t.Run("invalid media type", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, true) + testutils.MustUpdateFeature(t, features.SBOMScanning, true) + + req := httptest.NewRequest(http.MethodPost, "/sbom", nil) + req.Header.Add("Content-Type", "wrong") + recorder := httptest.NewRecorder() + + handler := SBOMScanHandler(imageintegration.Set()) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + err := res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusBadRequest, res.StatusCode) + }) + + t.Run("scanner v4 integration missing", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, true) + testutils.MustUpdateFeature(t, features.SBOMScanning, true) + + req := httptest.NewRequest(http.MethodPost, "/sbom", nil) + req.Header.Add("Content-Type", supportedMediaTypes.AsSlice()[0]) + recorder := httptest.NewRecorder() + + handler := SBOMScanHandler(imageintegration.Set()) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusInternalServerError, res.StatusCode) + assert.Contains(t, string(body), "integration") + }) + + t.Run("scanner v4 scan error", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, true) + testutils.MustUpdateFeature(t, features.SBOMScanning, true) + + req := httptest.NewRequest(http.MethodPost, "/sbom", nil) + req.Header.Add("Content-Type", supportedMediaTypes.AsSlice()[0]) + recorder := httptest.NewRecorder() + + ctrl := gomock.NewController(t) + + mockSBOMScanner := scannerTypesMocks.NewMockScannerSBOMer(ctrl) + mockSBOMScanner.EXPECT().ScanSBOM(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("fake error")) + mockSBOMScanner.EXPECT().Type().Return(scannerTypes.ScannerV4) + + mockImageScannerWithDS := scannerTypesMocks.NewMockImageScannerWithDataSource(ctrl) + mockImageScannerWithDS.EXPECT().GetScanner().Return(mockSBOMScanner).AnyTimes() + mockImageScannerWithDS.EXPECT().DataSource().Return(&storage.DataSource{}).AnyTimes() + + mockScannerSet := scannerMocks.NewMockSet(ctrl) + mockScannerSet.EXPECT().GetAll().Return([]scannerTypes.ImageScannerWithDataSource{mockImageScannerWithDS}) + + mockIntegrationSet := intergrationMocks.NewMockSet(ctrl) + mockIntegrationSet.EXPECT().ScannerSet().Return(mockScannerSet) + + handler := SBOMScanHandler(mockIntegrationSet) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusInternalServerError, res.StatusCode) + assert.Contains(t, string(body), "scanning sbom") + }) + + t.Run("valid scan", func(t *testing.T) { + testutils.MustUpdateFeature(t, features.ScannerV4, true) + testutils.MustUpdateFeature(t, features.SBOMScanning, true) + + req := httptest.NewRequest(http.MethodPost, "/sbom", nil) + req.Header.Add("Content-Type", supportedMediaTypes.AsSlice()[0]) + recorder := httptest.NewRecorder() + + ctrl := gomock.NewController(t) + + mockSBOMScanner := scannerTypesMocks.NewMockScannerSBOMer(ctrl) + mockSBOMScanner.EXPECT().ScanSBOM(gomock.Any(), gomock.Any(), gomock.Any()).Return(&v1.SBOMScanResponse{Id: "fake-sbom-id"}, nil) + mockSBOMScanner.EXPECT().Type().Return(scannerTypes.ScannerV4) + + mockImageScannerWithDS := scannerTypesMocks.NewMockImageScannerWithDataSource(ctrl) + mockImageScannerWithDS.EXPECT().GetScanner().Return(mockSBOMScanner).AnyTimes() + mockImageScannerWithDS.EXPECT().DataSource().Return(&storage.DataSource{}).AnyTimes() + + mockScannerSet := scannerMocks.NewMockSet(ctrl) + mockScannerSet.EXPECT().GetAll().Return([]scannerTypes.ImageScannerWithDataSource{mockImageScannerWithDS}) + + mockIntegrationSet := intergrationMocks.NewMockSet(ctrl) + mockIntegrationSet.EXPECT().ScannerSet().Return(mockScannerSet) + + handler := SBOMScanHandler(mockIntegrationSet) + handler.ServeHTTP(recorder, req) + + res := recorder.Result() + body, err := io.ReadAll(res.Body) + require.NoError(t, err) + err = res.Body.Close() + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.Contains(t, string(body), "fake-sbom-id") + }) + +} diff --git a/central/main.go b/central/main.go index 0a9e85f9e1886..50f62b8a20211 100644 --- a/central/main.go +++ b/central/main.go @@ -807,7 +807,13 @@ func customRoutes() (customRoutes []routes.CustomRoute) { { Route: "/api/v1/images/sbom", Authorizer: user.With(permissions.Modify(resources.Image)), - ServerHandler: imageService.SBOMHandler(imageintegration.Set(), enrichment.ImageEnricherSingleton(), enrichment.ImageEnricherV2Singleton(), sachelper.NewClusterSacHelper(clusterDataStore.Singleton()), riskManager.Singleton()), + ServerHandler: imageService.SBOMGenHandler(imageintegration.Set(), enrichment.ImageEnricherSingleton(), enrichment.ImageEnricherV2Singleton(), sachelper.NewClusterSacHelper(clusterDataStore.Singleton()), riskManager.Singleton()), + Compression: true, + }, + { + Route: "/api/v1/sboms/scan", + Authorizer: user.With(permissions.Modify(resources.Image)), + ServerHandler: imageService.SBOMScanHandler(imageintegration.Set()), Compression: true, }, { diff --git a/generated/api/v1/sbom.pb.go b/generated/api/v1/sbom.pb.go new file mode 100644 index 0000000000000..5857f099a3d19 --- /dev/null +++ b/generated/api/v1/sbom.pb.go @@ -0,0 +1,228 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc v6.32.1 +// source: api/v1/sbom.proto + +package v1 + +import ( + storage "github.com/stackrox/rox/generated/storage" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SBOMScanResponse wraps metadata and scan results for an SBOM. +// +// Components must be JSON marshal/unmarshall compatible with `storage.Image` +// so that output formatting via roxctl is consistent. +// +// There is no service defined for this response as it is handled +// by a custom route (non-gRPC). +// +// next available tag: 3 +type SBOMScanResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Scan *SBOMScanResponse_SBOMScan `protobuf:"bytes,2,opt,name=scan,proto3" json:"scan,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SBOMScanResponse) Reset() { + *x = SBOMScanResponse{} + mi := &file_api_v1_sbom_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SBOMScanResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SBOMScanResponse) ProtoMessage() {} + +func (x *SBOMScanResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_v1_sbom_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SBOMScanResponse.ProtoReflect.Descriptor instead. +func (*SBOMScanResponse) Descriptor() ([]byte, []int) { + return file_api_v1_sbom_proto_rawDescGZIP(), []int{0} +} + +func (x *SBOMScanResponse) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SBOMScanResponse) GetScan() *SBOMScanResponse_SBOMScan { + if x != nil { + return x.Scan + } + return nil +} + +// next available tag: 5 +type SBOMScanResponse_SBOMScan struct { + state protoimpl.MessageState `protogen:"open.v1"` + ScannerVersion string `protobuf:"bytes,1,opt,name=scanner_version,json=scannerVersion,proto3" json:"scanner_version,omitempty"` + ScanTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=scan_time,json=scanTime,proto3" json:"scan_time,omitempty"` + Components []*storage.EmbeddedImageScanComponent `protobuf:"bytes,3,rep,name=components,proto3" json:"components,omitempty"` + DataSource *storage.DataSource `protobuf:"bytes,4,opt,name=data_source,json=dataSource,proto3" json:"data_source,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SBOMScanResponse_SBOMScan) Reset() { + *x = SBOMScanResponse_SBOMScan{} + mi := &file_api_v1_sbom_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SBOMScanResponse_SBOMScan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SBOMScanResponse_SBOMScan) ProtoMessage() {} + +func (x *SBOMScanResponse_SBOMScan) ProtoReflect() protoreflect.Message { + mi := &file_api_v1_sbom_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SBOMScanResponse_SBOMScan.ProtoReflect.Descriptor instead. +func (*SBOMScanResponse_SBOMScan) Descriptor() ([]byte, []int) { + return file_api_v1_sbom_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *SBOMScanResponse_SBOMScan) GetScannerVersion() string { + if x != nil { + return x.ScannerVersion + } + return "" +} + +func (x *SBOMScanResponse_SBOMScan) GetScanTime() *timestamppb.Timestamp { + if x != nil { + return x.ScanTime + } + return nil +} + +func (x *SBOMScanResponse_SBOMScan) GetComponents() []*storage.EmbeddedImageScanComponent { + if x != nil { + return x.Components + } + return nil +} + +func (x *SBOMScanResponse_SBOMScan) GetDataSource() *storage.DataSource { + if x != nil { + return x.DataSource + } + return nil +} + +var File_api_v1_sbom_proto protoreflect.FileDescriptor + +const file_api_v1_sbom_proto_rawDesc = "" + + "\n" + + "\x11api/v1/sbom.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13storage/image.proto\"\xbf\x02\n" + + "\x10SBOMScanResponse\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x121\n" + + "\x04scan\x18\x02 \x01(\v2\x1d.v1.SBOMScanResponse.SBOMScanR\x04scan\x1a\xe7\x01\n" + + "\bSBOMScan\x12'\n" + + "\x0fscanner_version\x18\x01 \x01(\tR\x0escannerVersion\x127\n" + + "\tscan_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\bscanTime\x12C\n" + + "\n" + + "components\x18\x03 \x03(\v2#.storage.EmbeddedImageScanComponentR\n" + + "components\x124\n" + + "\vdata_source\x18\x04 \x01(\v2\x13.storage.DataSourceR\n" + + "dataSourceB'\n" + + "\x18io.stackrox.proto.api.v1Z\v./api/v1;v1b\x06proto3" + +var ( + file_api_v1_sbom_proto_rawDescOnce sync.Once + file_api_v1_sbom_proto_rawDescData []byte +) + +func file_api_v1_sbom_proto_rawDescGZIP() []byte { + file_api_v1_sbom_proto_rawDescOnce.Do(func() { + file_api_v1_sbom_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_api_v1_sbom_proto_rawDesc), len(file_api_v1_sbom_proto_rawDesc))) + }) + return file_api_v1_sbom_proto_rawDescData +} + +var file_api_v1_sbom_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_api_v1_sbom_proto_goTypes = []any{ + (*SBOMScanResponse)(nil), // 0: v1.SBOMScanResponse + (*SBOMScanResponse_SBOMScan)(nil), // 1: v1.SBOMScanResponse.SBOMScan + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*storage.EmbeddedImageScanComponent)(nil), // 3: storage.EmbeddedImageScanComponent + (*storage.DataSource)(nil), // 4: storage.DataSource +} +var file_api_v1_sbom_proto_depIdxs = []int32{ + 1, // 0: v1.SBOMScanResponse.scan:type_name -> v1.SBOMScanResponse.SBOMScan + 2, // 1: v1.SBOMScanResponse.SBOMScan.scan_time:type_name -> google.protobuf.Timestamp + 3, // 2: v1.SBOMScanResponse.SBOMScan.components:type_name -> storage.EmbeddedImageScanComponent + 4, // 3: v1.SBOMScanResponse.SBOMScan.data_source:type_name -> storage.DataSource + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_api_v1_sbom_proto_init() } +func file_api_v1_sbom_proto_init() { + if File_api_v1_sbom_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_v1_sbom_proto_rawDesc), len(file_api_v1_sbom_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_api_v1_sbom_proto_goTypes, + DependencyIndexes: file_api_v1_sbom_proto_depIdxs, + MessageInfos: file_api_v1_sbom_proto_msgTypes, + }.Build() + File_api_v1_sbom_proto = out.File + file_api_v1_sbom_proto_goTypes = nil + file_api_v1_sbom_proto_depIdxs = nil +} diff --git a/generated/api/v1/sbom.swagger.json b/generated/api/v1/sbom.swagger.json new file mode 100644 index 0000000000000..7010259cff871 --- /dev/null +++ b/generated/api/v1/sbom.swagger.json @@ -0,0 +1,46 @@ +{ + "swagger": "2.0", + "info": { + "title": "api/v1/sbom.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(&foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := &pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := &pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": ,\n \"lastName\": \n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/generated/api/v1/sbom_vtproto.pb.go b/generated/api/v1/sbom_vtproto.pb.go new file mode 100644 index 0000000000000..e09cbbdab5119 --- /dev/null +++ b/generated/api/v1/sbom_vtproto.pb.go @@ -0,0 +1,1017 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.1-0.20240409071808-615f978279ca +// source: api/v1/sbom.proto + +package v1 + +import ( + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + timestamppb1 "github.com/planetscale/vtprotobuf/types/known/timestamppb" + storage "github.com/stackrox/rox/generated/storage" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + io "io" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SBOMScanResponse_SBOMScan) CloneVT() *SBOMScanResponse_SBOMScan { + if m == nil { + return (*SBOMScanResponse_SBOMScan)(nil) + } + r := new(SBOMScanResponse_SBOMScan) + r.ScannerVersion = m.ScannerVersion + r.ScanTime = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.ScanTime).CloneVT()) + if rhs := m.Components; rhs != nil { + tmpContainer := make([]*storage.EmbeddedImageScanComponent, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface { + CloneVT() *storage.EmbeddedImageScanComponent + }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*storage.EmbeddedImageScanComponent) + } + } + r.Components = tmpContainer + } + if rhs := m.DataSource; rhs != nil { + if vtpb, ok := interface{}(rhs).(interface{ CloneVT() *storage.DataSource }); ok { + r.DataSource = vtpb.CloneVT() + } else { + r.DataSource = proto.Clone(rhs).(*storage.DataSource) + } + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SBOMScanResponse_SBOMScan) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SBOMScanResponse) CloneVT() *SBOMScanResponse { + if m == nil { + return (*SBOMScanResponse)(nil) + } + r := new(SBOMScanResponse) + r.Id = m.Id + r.Scan = m.Scan.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SBOMScanResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (this *SBOMScanResponse_SBOMScan) EqualVT(that *SBOMScanResponse_SBOMScan) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.ScannerVersion != that.ScannerVersion { + return false + } + if !(*timestamppb1.Timestamp)(this.ScanTime).EqualVT((*timestamppb1.Timestamp)(that.ScanTime)) { + return false + } + if len(this.Components) != len(that.Components) { + return false + } + for i, vx := range this.Components { + vy := that.Components[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &storage.EmbeddedImageScanComponent{} + } + if q == nil { + q = &storage.EmbeddedImageScanComponent{} + } + if equal, ok := interface{}(p).(interface { + EqualVT(*storage.EmbeddedImageScanComponent) bool + }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + if equal, ok := interface{}(this.DataSource).(interface { + EqualVT(*storage.DataSource) bool + }); ok { + if !equal.EqualVT(that.DataSource) { + return false + } + } else if !proto.Equal(this.DataSource, that.DataSource) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *SBOMScanResponse_SBOMScan) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*SBOMScanResponse_SBOMScan) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *SBOMScanResponse) EqualVT(that *SBOMScanResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Id != that.Id { + return false + } + if !this.Scan.EqualVT(that.Scan) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *SBOMScanResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*SBOMScanResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (m *SBOMScanResponse_SBOMScan) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SBOMScanResponse_SBOMScan) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SBOMScanResponse_SBOMScan) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DataSource != nil { + if vtmsg, ok := interface{}(m.DataSource).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.DataSource) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Components) > 0 { + for iNdEx := len(m.Components) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Components[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Components[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ScanTime != nil { + size, err := (*timestamppb1.Timestamp)(m.ScanTime).MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ScannerVersion) > 0 { + i -= len(m.ScannerVersion) + copy(dAtA[i:], m.ScannerVersion) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ScannerVersion))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SBOMScanResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SBOMScanResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SBOMScanResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Scan != nil { + size, err := m.Scan.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SBOMScanResponse_SBOMScan) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScannerVersion) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.ScanTime != nil { + l = (*timestamppb1.Timestamp)(m.ScanTime).SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.Components) > 0 { + for _, e := range m.Components { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.DataSource != nil { + if size, ok := interface{}(m.DataSource).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.DataSource) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SBOMScanResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Scan != nil { + l = m.Scan.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SBOMScanResponse_SBOMScan) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SBOMScanResponse_SBOMScan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SBOMScanResponse_SBOMScan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScannerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScannerVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScanTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScanTime == nil { + m.ScanTime = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.ScanTime).UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Components", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Components = append(m.Components, &storage.EmbeddedImageScanComponent{}) + if unmarshal, ok := interface{}(m.Components[len(m.Components)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Components[len(m.Components)-1]); err != nil { + return err + } + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataSource == nil { + m.DataSource = &storage.DataSource{} + } + if unmarshal, ok := interface{}(m.DataSource).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.DataSource); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SBOMScanResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SBOMScanResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SBOMScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scan == nil { + m.Scan = &SBOMScanResponse_SBOMScan{} + } + if err := m.Scan.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SBOMScanResponse_SBOMScan) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SBOMScanResponse_SBOMScan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SBOMScanResponse_SBOMScan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScannerVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.ScannerVersion = stringValue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScanTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScanTime == nil { + m.ScanTime = ×tamppb.Timestamp{} + } + if err := (*timestamppb1.Timestamp)(m.ScanTime).UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Components", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Components = append(m.Components, &storage.EmbeddedImageScanComponent{}) + if unmarshal, ok := interface{}(m.Components[len(m.Components)-1]).(interface { + UnmarshalVTUnsafe([]byte) error + }); ok { + if err := unmarshal.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Components[len(m.Components)-1]); err != nil { + return err + } + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataSource == nil { + m.DataSource = &storage.DataSource{} + } + if unmarshal, ok := interface{}(m.DataSource).(interface { + UnmarshalVTUnsafe([]byte) error + }); ok { + if err := unmarshal.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.DataSource); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SBOMScanResponse) UnmarshalVTUnsafe(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SBOMScanResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SBOMScanResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.Id = stringValue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scan == nil { + m.Scan = &SBOMScanResponse_SBOMScan{} + } + if err := m.Scan.UnmarshalVTUnsafe(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/pkg/env/image_scan.go b/pkg/env/image_scan.go index e6d23f355cc88..8b4dc02d52e05 100644 --- a/pkg/env/image_scan.go +++ b/pkg/env/image_scan.go @@ -8,4 +8,7 @@ var ( // SBOMGenerationMaxReqSizeBytes defines the maximum allowed size of an SBOM generation API request. SBOMGenerationMaxReqSizeBytes = RegisterIntegerSetting("ROX_SBOM_GEN_MAX_REQ_SIZE_BYTES", 100*1024) + + // SBOMScanMaxReqSizeBytes defines the maximum allowed size of an SBOM scan API request (100 MB). + SBOMScanMaxReqSizeBytes = RegisterIntegerSetting("ROX_SBOM_SCAN_MAX_REQ_SIZE_BYTES", 100*1024*1024) ) diff --git a/pkg/features/list.go b/pkg/features/list.go index 3cfa019b415a8..a0024d7a3bbba 100644 --- a/pkg/features/list.go +++ b/pkg/features/list.go @@ -170,4 +170,7 @@ var ( // ScannerV4StoreExternalIndexReports enables storing index reports from delegated scans to Central's Scanner V4 Indexer. ScannerV4StoreExternalIndexReports = registerFeature("Enables storing index reports from delegated scans to Central's Scanner V4 Indexer", "ROX_SCANNER_V4_STORE_EXTERNAL_INDEX_REPORTS", enabled) + + // SBOMScanning enables matching vulnerabilities to components found in Red Hat produced SBOMs. + SBOMScanning = registerFeature("Enables matching vulnerabilities to components found in Red Hat produced SBOMs", "ROX_SBOM_SCANNING") ) diff --git a/pkg/scanners/scannerv4/scannerv4.go b/pkg/scanners/scannerv4/scannerv4.go index b970e5c0b43f0..7e8cdc12c8880 100644 --- a/pkg/scanners/scannerv4/scannerv4.go +++ b/pkg/scanners/scannerv4/scannerv4.go @@ -3,6 +3,7 @@ package scannerv4 import ( "context" "fmt" + "io" "time" "github.com/google/go-containerregistry/pkg/authn" @@ -132,6 +133,144 @@ func (s *scannerv4) GetSBOM(image *storage.Image) ([]byte, bool, error) { return sbom, found, err } +// ScanSBOM scans an SBOM, the contentType (which would include media type, optionally version, etc.) +// will be passed to the scanner to assist in parsing. +func (s *scannerv4) ScanSBOM(ctx context.Context, sbomReader io.Reader, contentType string) (*v1.SBOMScanResponse, error) { + ctx, cancel := context.WithTimeout(ctx, scanTimeout) + defer cancel() + + var scannerVersion pkgscanner.Version + scannerVersion.Matcher = "v7" + + // TODO(ROX-30570): START Remove + // Read all data from the SBOM reader and throw it away (testing purposes only) + _ = ctx + dataB, err := io.ReadAll(sbomReader) + if err != nil { + return nil, fmt.Errorf("reading sbom data: %w", err) + } + log.Debugf("Scanned SBOM: %s", dataB) + // Create a fake vuln report for testing purposes + vr := fakeVulnReport() + // TODO(ROX-30570): END Remove + + // TODO(ROX-30570): Replace with actual scanner client call + // vr, err := s.scannerClient.ScanSBOM(ctx, sbomReader, contentType, client.Version(&scannerVersion)) + // if err != nil { + // return nil, fmt.Errorf("scanning sbom: %w", err) + // } + + scannerVersionStr, err := scannerVersion.Encode() + if err != nil { + log.Warnf("Failed to encode Scanner version: %v", err) + } + + return &v1.SBOMScanResponse{ + Id: vr.GetHashId(), + Scan: sbomScan(vr, scannerVersionStr), + }, nil +} + +// fakeVulnReport generates a fake vuln report for testing purposes. +// +// TODO(ROX-30570): REMOVE +func fakeVulnReport() *v4.VulnerabilityReport { + return &v4.VulnerabilityReport{ + HashId: "fake HashId", + Vulnerabilities: map[string]*v4.VulnerabilityReport_Vulnerability{ + "v1": { + Name: "Fake Vuln #1", + NormalizedSeverity: v4.VulnerabilityReport_Vulnerability_SEVERITY_CRITICAL, + CvssMetrics: []*v4.VulnerabilityReport_Vulnerability_CVSS{ + { + V3: &v4.VulnerabilityReport_Vulnerability_CVSS_V3{ + Vector: "CVSS:3.1/AV:L/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:N", + }, + Source: v4.VulnerabilityReport_Vulnerability_CVSS_SOURCE_NVD, + Url: "https://nvd.nist.gov/vuln/detail/CVE-5678-1234", + }, + }, + }, + "v2": { + Name: "Fake Vuln #2", + NormalizedSeverity: v4.VulnerabilityReport_Vulnerability_SEVERITY_IMPORTANT, + CvssMetrics: []*v4.VulnerabilityReport_Vulnerability_CVSS{ + { + V3: &v4.VulnerabilityReport_Vulnerability_CVSS_V3{ + Vector: "CVSS:3.1/AV:L/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:N", + }, + Source: v4.VulnerabilityReport_Vulnerability_CVSS_SOURCE_NVD, + Url: "https://nvd.nist.gov/vuln/detail/CVE-5678-1234", + }, + }, + }, + "v3": { + Name: "Fake Vuln #3", + NormalizedSeverity: v4.VulnerabilityReport_Vulnerability_SEVERITY_MODERATE, + + CvssMetrics: []*v4.VulnerabilityReport_Vulnerability_CVSS{ + { + V3: &v4.VulnerabilityReport_Vulnerability_CVSS_V3{ + BaseScore: 8.2, + Vector: "CVSS:3.1/AV:A/AC:L/PR:N/UI:N/S:C/C:L/I:N/A:H", + }, + Source: v4.VulnerabilityReport_Vulnerability_CVSS_SOURCE_RED_HAT, + Url: "https://access.redhat.com/security/cve/CVE-1234-567", + }, + { + V2: &v4.VulnerabilityReport_Vulnerability_CVSS_V2{ + BaseScore: 6.4, + Vector: "AV:N/AC:M/Au:M/C:C/I:N/A:P", + }, + Source: v4.VulnerabilityReport_Vulnerability_CVSS_SOURCE_NVD, + Url: "https://nvd.nist.gov/vuln/detail/CVE-1234-567", + }, + }, + }, + "v4": { + Name: "Fake Vuln #4", + NormalizedSeverity: v4.VulnerabilityReport_Vulnerability_SEVERITY_LOW, + CvssMetrics: []*v4.VulnerabilityReport_Vulnerability_CVSS{ + { + V3: &v4.VulnerabilityReport_Vulnerability_CVSS_V3{ + Vector: "CVSS:3.1/AV:L/AC:H/PR:L/UI:R/S:U/C:L/I:L/A:N", + }, + Source: v4.VulnerabilityReport_Vulnerability_CVSS_SOURCE_NVD, + Url: "https://nvd.nist.gov/vuln/detail/CVE-5678-1234", + }, + }, + }, + }, + PackageVulnerabilities: map[string]*v4.StringList{ + "p1": {Values: []string{"v1", "v2", "v3", "v4"}}, + "p2": {Values: []string{"v3", "v4"}}, + }, + Contents: &v4.Contents{ + Packages: map[string]*v4.Package{ + "p1": {Name: "Fake Package #1", Version: "v1.0.0"}, + "p2": {Name: "Fake Package #2", Version: "v2.3.4"}, + }, + }, + } +} + +func sbomScan(vr *v4.VulnerabilityReport, scannerVersionStr string) *v1.SBOMScanResponse_SBOMScan { + imageScan := imageScan(nil, vr, scannerVersionStr) + + for _, c := range imageScan.GetComponents() { + for _, v := range c.GetVulns() { + // With SBOMs we will not always know what the component represents. + v.VulnerabilityType = storage.EmbeddedVulnerability_UNKNOWN_VULNERABILITY + } + } + + return &v1.SBOMScanResponse_SBOMScan{ + ScannerVersion: imageScan.GetScannerVersion(), + ScanTime: imageScan.GetScanTime(), + Components: imageScan.GetComponents(), + } +} + func (s *scannerv4) GetScan(image *storage.Image) (*storage.ImageScan, error) { if image.GetMetadata() == nil { return nil, nil diff --git a/pkg/scanners/types/mocks/types.go b/pkg/scanners/types/mocks/types.go index c10fe4d80ff4b..96bf2d432c494 100644 --- a/pkg/scanners/types/mocks/types.go +++ b/pkg/scanners/types/mocks/types.go @@ -10,6 +10,8 @@ package mocks import ( + context "context" + io "io" reflect "reflect" v1 "github.com/stackrox/rox/generated/api/v1" @@ -185,6 +187,21 @@ func (mr *MockSBOMerMockRecorder) GetSBOM(image any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSBOM", reflect.TypeOf((*MockSBOMer)(nil).GetSBOM), image) } +// ScanSBOM mocks base method. +func (m *MockSBOMer) ScanSBOM(ctx context.Context, reader io.Reader, mediatype string) (*v1.SBOMScanResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScanSBOM", ctx, reader, mediatype) + ret0, _ := ret[0].(*v1.SBOMScanResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ScanSBOM indicates an expected call of ScanSBOM. +func (mr *MockSBOMerMockRecorder) ScanSBOM(ctx, reader, mediatype any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScanSBOM", reflect.TypeOf((*MockSBOMer)(nil).ScanSBOM), ctx, reader, mediatype) +} + // MockScannerSBOMer is a mock of ScannerSBOMer interface. type MockScannerSBOMer struct { ctrl *gomock.Controller @@ -297,6 +314,21 @@ func (mr *MockScannerSBOMerMockRecorder) Name() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockScannerSBOMer)(nil).Name)) } +// ScanSBOM mocks base method. +func (m *MockScannerSBOMer) ScanSBOM(ctx context.Context, reader io.Reader, mediatype string) (*v1.SBOMScanResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ScanSBOM", ctx, reader, mediatype) + ret0, _ := ret[0].(*v1.SBOMScanResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ScanSBOM indicates an expected call of ScanSBOM. +func (mr *MockScannerSBOMerMockRecorder) ScanSBOM(ctx, reader, mediatype any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScanSBOM", reflect.TypeOf((*MockScannerSBOMer)(nil).ScanSBOM), ctx, reader, mediatype) +} + // Test mocks base method. func (m *MockScannerSBOMer) Test() error { m.ctrl.T.Helper() diff --git a/pkg/scanners/types/types.go b/pkg/scanners/types/types.go index cbf4ea2566eaa..a023445d0f8e5 100644 --- a/pkg/scanners/types/types.go +++ b/pkg/scanners/types/types.go @@ -1,6 +1,9 @@ package types import ( + "context" + "io" + v1 "github.com/stackrox/rox/generated/api/v1" v4 "github.com/stackrox/rox/generated/internalapi/scanner/v4" "github.com/stackrox/rox/generated/storage" @@ -34,8 +37,11 @@ type Scanner interface { // SBOM is the interface that contains the StackRox SBOM methods type SBOMer interface { - // GetSBOM to get sbom for an image + // GetSBOM to get SBOM for an image. GetSBOM(image *storage.Image) ([]byte, bool, error) + + // ScanSBOM to match vulnerabilities to components found in an SBOM. + ScanSBOM(ctx context.Context, reader io.Reader, mediatype string) (*v1.SBOMScanResponse, error) } // ScannerSBOMer represents a Scanner with SBOM generation capabilities. This diff --git a/proto/api/v1/sbom.proto b/proto/api/v1/sbom.proto new file mode 100644 index 0000000000000..70d833612db49 --- /dev/null +++ b/proto/api/v1/sbom.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package v1; + +import "google/protobuf/timestamp.proto"; +import "storage/image.proto"; + +option go_package = "./api/v1;v1"; +option java_package = "io.stackrox.proto.api.v1"; + +// SBOMScanResponse wraps metadata and scan results for an SBOM. +// +// Components must be JSON marshal/unmarshall compatible with `storage.Image` +// so that output formatting via roxctl is consistent. +// +// There is no service defined for this response as it is handled +// by a custom route (non-gRPC). +// +// next available tag: 3 +message SBOMScanResponse { + string id = 1; + + // next available tag: 5 + message SBOMScan { + string scanner_version = 1; + google.protobuf.Timestamp scan_time = 2; + repeated storage.EmbeddedImageScanComponent components = 3; + storage.DataSource data_source = 4; + } + SBOMScan scan = 2; +} From 7d45b72b40ebcdfc3a14bc2f256e5cd6bb834d76 Mon Sep 17 00:00:00 2001 From: Mark Pedrotti Date: Fri, 13 Feb 2026 22:47:15 -0500 Subject: [PATCH 191/232] ROX-33147: Add SearchFieldLabel as frontend counterpart to backend (#19002) --- .../attributes/complianceScan.ts | 2 +- .../Components/CompoundSearchFilter/types.ts | 3 +- .../Containers/Clusters/searchFilterConfig.ts | 53 ++- ui/apps/platform/src/types/searchOptions.ts | 428 ++++++++++++++++++ 4 files changed, 469 insertions(+), 17 deletions(-) create mode 100644 ui/apps/platform/src/types/searchOptions.ts diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/complianceScan.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/complianceScan.ts index 9f57f2f6edb47..35090441b3070 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/complianceScan.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/attributes/complianceScan.ts @@ -5,7 +5,7 @@ import type { CompoundSearchFilterAttribute } from '../types'; export const ConfigID: CompoundSearchFilterAttribute = { displayName: 'Config ID', filterChipLabel: 'Compliance scan config ID', - searchTerm: 'Compliance Scan Config Id', + searchTerm: 'Compliance Scan Config ID', inputType: 'text', }; diff --git a/ui/apps/platform/src/Components/CompoundSearchFilter/types.ts b/ui/apps/platform/src/Components/CompoundSearchFilter/types.ts index 877af980ba7e7..c211b58a0b2aa 100644 --- a/ui/apps/platform/src/Components/CompoundSearchFilter/types.ts +++ b/ui/apps/platform/src/Components/CompoundSearchFilter/types.ts @@ -1,5 +1,6 @@ import type { SearchCategory } from 'services/SearchService'; import type { FeatureFlagEnvVar } from 'types/featureFlag'; +import type { SearchFieldLabel } from 'types/searchOptions'; import type { NonEmptyArray } from 'utils/type.utils'; import type { ConditionTextInputProps } from './components/SearchFilterConditionText'; @@ -31,7 +32,7 @@ export type SelectSearchFilterGroupedOptions = { type BaseSearchFilterAttribute = { displayName: string; filterChipLabel: string; - searchTerm: string; + searchTerm: SearchFieldLabel; inputType: InputType; featureFlagDependency?: FeatureFlagEnvVar[]; }; diff --git a/ui/apps/platform/src/Containers/Clusters/searchFilterConfig.ts b/ui/apps/platform/src/Containers/Clusters/searchFilterConfig.ts index b5daf274e2bd4..93a5b87066fab 100644 --- a/ui/apps/platform/src/Containers/Clusters/searchFilterConfig.ts +++ b/ui/apps/platform/src/Containers/Clusters/searchFilterConfig.ts @@ -20,26 +20,49 @@ export const statusSelectOptions: SelectSearchFilterOptions['options'] = [ { label: 'Uninitialized', value: 'UNINITIALIZED' }, ]; -function createStatusAttribute(entity: string): CompoundSearchFilterAttribute { - return { - displayName: 'Status', - filterChipLabel: `${entity} status`, - searchTerm: `${entity} status`, - inputType: 'select', - inputProps: { options: statusSelectOptions }, - }; -} +const admissionControlStatusAttribute: CompoundSearchFilterAttribute = { + displayName: 'Status', + filterChipLabel: 'Admission control status', + searchTerm: 'Admission Control Status', + inputType: 'select', + inputProps: { options: statusSelectOptions }, +}; +const clusterStatusAttribute: CompoundSearchFilterAttribute = { + displayName: 'Status', + filterChipLabel: 'Cluster status', + searchTerm: 'Cluster Status', + inputType: 'select', + inputProps: { options: statusSelectOptions }, +}; + +const collectorStatusAttribute: CompoundSearchFilterAttribute = { + displayName: 'Status', + filterChipLabel: 'Collector status', + searchTerm: 'Collector Status', + inputType: 'select', + inputProps: { options: statusSelectOptions }, +}; -const admissionControlStatusAttribute = createStatusAttribute('Admission control'); -const clusterStatusAttribute = createStatusAttribute('Cluster'); -const collectorStatusAttribute = createStatusAttribute('Collector'); -const scannerStatusAttribute = createStatusAttribute('Scanner'); -const sensorStatusAttribute = createStatusAttribute('Sensor'); +const scannerStatusAttribute: CompoundSearchFilterAttribute = { + displayName: 'Status', + filterChipLabel: 'Scanner status', + searchTerm: 'Scanner Status', + inputType: 'select', + inputProps: { options: statusSelectOptions }, +}; + +const sensorStatusAttribute: CompoundSearchFilterAttribute = { + displayName: 'Status', + filterChipLabel: 'Sensor status', + searchTerm: 'Sensor Status', + inputType: 'select', + inputProps: { options: statusSelectOptions }, +}; const lastContactAttributes: CompoundSearchFilterAttribute = { displayName: 'Date', filterChipLabel: 'Last contact', - searchTerm: 'Last contact', + searchTerm: 'Last Contact', inputType: 'date-picker', }; diff --git a/ui/apps/platform/src/types/searchOptions.ts b/ui/apps/platform/src/types/searchOptions.ts new file mode 100644 index 0000000000000..63d8015a04a50 --- /dev/null +++ b/ui/apps/platform/src/types/searchOptions.ts @@ -0,0 +1,428 @@ +// Frontend counterpart to backend source of truth: +// https://github.com/stackrox/stackrox/blob/master/pkg/search/options.go + +// Minimal conversion: original order with empty comments in place of empty lines. +export type SearchFieldLabel = + | 'Cluster' + | 'Cluster ID' + | 'Cluster Label' + | 'Cluster Scope' + | 'Cluster Type' + | 'Cluster Discovered Time' + | 'Cluster Platform Type' + | 'Cluster Kubernetes Version' + // cluster health search fields + | 'Cluster Status' + | 'Sensor Status' + | 'Collector Status' + | 'Admission Control Status' + | 'Scanner Status' + | 'Last Contact' + // + | 'Policy ID' + | 'Enforcement' + | 'Policy' + | 'Policy Category' + | 'Policy Category ID' + // + | 'Lifecycle Stage' + | 'Description' + | 'Category' + | 'Severity' + | 'SEVERITY' // TODO replace UPPERCASE with Title Case + | 'Disabled' + // + | 'CVE ID' + | 'CVE' + | 'CVE Type' + | 'CVE Published On' + | 'CVE Fix Available Timestamp' + | 'CVE Created Time' + | 'CVE Snoozed' + | 'CVE Snooze Expiry' + | 'CVSS' + | 'NVD CVSS' + | 'Impact Score' + | 'Vulnerability State' + | 'CVE Orphaned' + | 'CVE Orphaned Time' + | 'EPSS Probability' + | 'Known Exploit' // frontend only pending backend implementation see obsolete #16887 + | 'Known Ransomware Campaign' // frontend only pending backend implementation see obsolete #16887 + | 'Advisory Name' + | 'Advisory Link' + // + | 'CVE Info' + // + | 'Component' + | 'Component ID' + | 'Component Version' + | 'Component Source' + | 'Component Location' + | 'Component Top CVSS' + | 'Dockerfile Instruction Keyword' + | 'Dockerfile Instruction Value' + | 'First Image Occurrence Timestamp' + | 'First System Occurrence Timestamp' + | 'Host IPC' + | 'Host Network' + | 'Host PID' + | 'Image Created Time' + | 'Image' + | 'Image Sha' + | 'Image Signature Fetched Time' + | 'Image Signature Verified By' + | 'Image Registry' + | 'Image Remote' + | 'Image Scan Time' + | 'Node Scan Time' + | 'Image OS' + | 'Image Tag' + | 'Image User' + | 'Image Command' + | 'Image CVE Count' + | 'Image Entrypoint' + | 'Image Label' + | 'Image Volumes' + | 'Fixable' + | 'FIXABLE' // TODO replace UPPERCASE with Title Case + | 'Fixed By' + | 'Cluster CVE Fixed By' + | 'Cluster CVE Fixable' + | 'CLUSTER CVE FIXABLE' // TODO replace UPPERCASE with Title Case + | 'Fixable CVE Count' + | 'Last Updated' + | 'Image Top CVSS' + | 'Node Top CVSS' + | 'Image ID' + | 'Unknown CVE Count' + | 'Fixable Unknown CVE Count' + | 'Critical CVE Count' + | 'Fixable Critical CVE Count' + | 'Important CVE Count' + | 'Fixable Important CVE Count' + | 'Moderate CVE Count' + | 'Fixable Moderate CVE Count' + | 'Low CVE Count' + | 'Fixable Low CVE Count' + // + // Base Image + | 'Base Image Id' + | 'Base Image Repository' + | 'Base Image Tag' + | 'Base Image Active' + | 'Base Image Manifest Digest' + | 'Base Image First Layer Digest' + | 'Base Image Layer Digest' + | 'Base Image Index' + | 'Base Image Discovered At' + // + // Deployment related fields + | 'Add Capabilities' + | 'Allow Privilege Escalation' + | 'AppArmor Profile' + | 'Automount Service Account Token' + | 'Deployment Annotation' + | 'CPU Cores Limit' + | 'CPU Cores Request' + | 'Container ID' + | 'Container Image Digest' + | 'Container Name' + | 'Deployment ID' + | 'Deployment' + | 'Deployment Label' + | 'Deployment Type' + | 'Drop Capabilities' + | 'Environment Key' + | 'Environment Value' + | 'Environment Variable Source' + | 'Exposed Node Port' + | 'Exposing Service' + | 'Exposing Service Port' + | 'Exposure Level' + | 'External IP' + | 'External Hostname' + | 'Image Pull Secret' + | 'Liveness Probe Defined' + | 'Max Exposure Level' + | 'Memory Limit (MB)' + | 'Memory Request (MB)' + | 'Mount Propagation' + | 'Orchestrator Component' + | 'Platform Component' + // PolicyViolated is a fake search field to filter deployments that have violation. + // This is handled/supported only by deployments sub-resolver of policy resolver. + // Note that 'Policy Violated=false' is not yet supported. + | 'Policy Violated' + | 'Port' + | 'Port Protocol' + // Priority is used in risk datastore internally. + | 'Priority' + | 'Administration Usage Timestamp' + | 'Administration Usage Nodes' + | 'Administration Usage CPU Units' + | 'Cluster Risk Priority' + | 'Namespace Risk Priority' + | 'Privileged' + | 'Process Tag' + | 'Read Only Root Filesystem' + | 'Replicas' + | 'Readiness Probe Defined' + | 'Secret ID' + | 'Secret' + | 'Secret Path' + | 'Seccomp Profile Type' + | 'Service Account' + | 'Service Account Permission Level' + | 'Service Account Label' + | 'Service Account Annotation' + | 'Created' + | 'Volume Name' + | 'Volume Source' + | 'Volume Destination' + | 'Volume ReadOnly' + | 'Volume Type' + | 'Taint Key' + | 'Taint Value' + | 'Toleration Key' + | 'Toleration Value' + | 'Taint Effect' + // + | 'Alert ID' + | 'Violation' + | 'Violation State' + | 'Violation Time' + | 'Tag' + | 'Entity Type' + // + // Pod Search fields + | 'Pod UID' + | 'Pod ID' + | 'Pod Name' + | 'Pod Label' + // + // ProcessIndicator Search fields + | 'Process ID' + | 'Process Path' + | 'Process Name' + | 'Process Arguments' + | 'Process Ancestor' + | 'Process UID' + | 'Process Creation Time' + | 'Process Container Start Time' + // + // FileActivity Search fields + | 'Effective Path' + | 'Actual Path' + | 'File Operation' + // + // ProcessListeningOnPort Search fields + | 'Closed' + | 'Closed Time' + // + // Secret search fields + | 'Secret Type' + | 'Cert Expiration' + | 'Image Pull Secret Registry' + // + // Compliance search fields + | 'Standard' + | 'Standard ID' + // + | 'Control Group ID' + | 'Control Group' + // + | 'Control ID' + | 'Control' + // + | 'Compliance Operator Integration ID' + | 'Compliance Operator Version' + | 'Compliance Scan Name' + | 'Compliance Operator Installed' + | 'Compliance Rule Severity' + | 'Compliance Operator Status' + | 'Compliance Check Status' + | 'Compliance Rule Name' + | 'Compliance Profile ID' + | 'Compliance Profile Name' + | 'Compliance Config Profile Name' + | 'Compliance Profile Product Type' + | 'Compliance Profile Version' + | 'Compliance Standard' + | 'Compliance Control' + | 'Compliance Scan Config ID' + | 'Compliance Scan Config Name' + | 'Compliance Check ID' + | 'Compliance Check UID' + | 'Compliance Check Name' + | 'Compliance Check Rationale' + | 'Compliance Check Last Started Time' + | 'Compliance Scan Config Last Updated Time' + | 'Compliance Check Result Created Time' + | 'Compliance Scan Last Executed Time' + | 'Compliance Scan Last Started Time' + | 'Compliance Rule Type' + | 'Compliance Scan Setting Binding Name' + | 'Compliance Suite Name' + | 'Compliance Scan Result' + | 'Profile Ref ID' + | 'Scan Ref ID' + | 'Rule Ref ID' + | 'Compliance Remediation Name' + | 'Compliance Benchmark Name' + | 'Compliance Benchmark Short Name' + | 'Compliance Benchmark Version' + | 'Compliance Report Name' + | 'Compliance Report State' + | 'Compliance Report Started Time' + | 'Compliance Report Completed Time' + | 'Compliance Report Request Type' + | 'Compliance Report Notification Method' + // + // Node search fields + | 'Node' + | 'Node ID' + | 'Operating System' + | 'Container Runtime' + | 'Node Join Time' + | 'Node Label' + | 'Node Annotation' + // + // Namespace Search Fields + | 'Namespace ID' + | 'Namespace' + | 'Namespace Annotation' + | 'Namespace Label' + // + // Role Search Fields + | 'Role ID' + | 'Role' + | 'Role Label' + | 'Role Annotation' + | 'Cluster Role' + // + // Role Binding Search Fields + | 'Role Binding ID' + | 'Role Binding' + | 'Role Binding Label' + | 'Role Binding Annotation' + // + // Subject search fields + | 'Subject Kind' + | 'Subject' + // + // General + | 'Created Time' + // + // Inactive Deployment + | 'Inactive Deployment' + // + // Risk Search Fields + | 'Risk Score' + | 'Node Risk Score' + | 'Deployment Risk Score' + | 'Image Risk Score' + | 'Component Risk Score' + | 'Risk Subject Type' + | 'Component Layer Type' + // + | 'Policy Last Updated' + // + // Following are helper fields used for sorting + // For example, "SORTPolicyName" field should be used to sort policies when the query sort field is "PolicyName" + | 'SORT_Policy' + | 'SORT_Lifecycle Stage' + | 'SORT_Enforcement' + // + // Omit derived fields + // + // External network sources fields + | 'Default External Source' + | 'Discovered External Source' + | 'External Source Address' + // + // Report configurations search fields + | 'Report Name' + | 'Report Type' + | 'Report Configuration ID' + // View Based report search fields + | 'Area Of Concern' + // + // Resource alerts search fields + | 'Resource' + | 'Resource Type' + // + // Vulnerability Watch Request fields + | 'Request Name' + | 'Request Status' + | 'Expired Request' + | 'Expiry Type' + | 'Request Expiry Time' + | 'Request Expires When Fixed' + | 'Requested Vulnerability State' + | 'User ID' + | 'User Name' + | 'Image Registry Scope' + | 'Image Remote Scope' + | 'Image Tag Scope' + | 'Requester User ID' + | 'Requester User Name' + | 'Approver User ID' + | 'Approver User Name' + | 'Deferral Update CVEs' + | 'False Positive Update CVEs' + // + | 'Compliance Domain ID' + | 'Compliance Run ID' + | 'Compliance Run Finished Timestamp' + // + // Resource Collection fields + | 'Collection ID' + | 'Collection Name' + | 'Embedded Collection ID' + // + // Group fields + | 'Group Auth Provider' + | 'Group Key' + | 'Group Value' + // + // API Token fields + | 'Expiration' + | 'Revoked' + // + // Version fields + | 'Version' + | 'Minimum Sequence Number' + | 'Current Sequence Number' + | 'Last Persisted' + // + // Blob store fields + | 'Blob Name' + | 'Blob Length' + | 'Blob Modified On' + // + // Report Metadata fields + | 'Report State' + | 'Report Init Time' + | 'Report Completion Time' + | 'Report Request Type' + | 'Report Notification Method' + // + // Event fields. + | 'Event Domain' + | 'Event Type' + | 'Event Level' + | 'Event Occurrence' + // + // Integration fields. + | 'Integration ID' + | 'Integration Name' + | 'Integration Type' + // + // AuthProvider fields. + | 'AuthProvider Name' + // + // Virtual Machine fields. + | 'Virtual Machine ID' + | 'Virtual Machine Name' + | 'SCANNABLE'; // frontend only pending backend TODO replace SCANNABLE with Scan Status From b603bbc187de5bb046f9784c24466462287f5657 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Feb 2026 03:28:28 -0700 Subject: [PATCH 192/232] chore(deps): bump the k8s-io group with 8 updates (#18963) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 22 +++++++++++----------- go.sum | 44 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/go.mod b/go.mod index 00591557777bb..91d15471e2f48 100644 --- a/go.mod +++ b/go.mod @@ -163,14 +163,14 @@ require ( gorm.io/driver/postgres v1.6.0 gorm.io/gorm v1.31.1 helm.sh/helm/v3 v3.19.4 - k8s.io/api v0.35.0 - k8s.io/apiextensions-apiserver v0.35.0 - k8s.io/apimachinery v0.35.0 - k8s.io/apiserver v0.35.0 - k8s.io/cli-runtime v0.34.3 - k8s.io/client-go v0.35.0 - k8s.io/kubectl v0.34.3 - k8s.io/kubelet v0.32.11 + k8s.io/api v0.35.1 + k8s.io/apiextensions-apiserver v0.35.1 + k8s.io/apimachinery v0.35.1 + k8s.io/apiserver v0.35.1 + k8s.io/cli-runtime v0.34.4 + k8s.io/client-go v0.35.1 + k8s.io/kubectl v0.34.4 + k8s.io/kubelet v0.32.12 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 kubevirt.io/api v1.7.0 sigs.k8s.io/controller-runtime v0.23.1 @@ -511,9 +511,9 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/code-generator v0.35.0 // indirect - k8s.io/component-base v0.35.0 // indirect - k8s.io/component-helpers v0.34.3 // indirect + k8s.io/code-generator v0.35.1 // indirect + k8s.io/component-base v0.35.1 // indirect + k8s.io/component-helpers v0.34.4 // indirect k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect diff --git a/go.sum b/go.sum index ee405d3aa5c1d..24800d30efcaa 100644 --- a/go.sum +++ b/go.sum @@ -2349,26 +2349,26 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= -k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= -k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= +k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= +k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= +k8s.io/apiextensions-apiserver v0.35.1 h1:p5vvALkknlOcAqARwjS20kJffgzHqwyQRM8vHLwgU7w= +k8s.io/apiextensions-apiserver v0.35.1/go.mod h1:2CN4fe1GZ3HMe4wBr25qXyJnJyZaquy4nNlNmb3R7AQ= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= -k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4= -k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds= -k8s.io/cli-runtime v0.34.3 h1:YRyMhiwX0dT9lmG0AtZDaeG33Nkxgt9OlCTZhRXj9SI= -k8s.io/cli-runtime v0.34.3/go.mod h1:GVwL1L5uaGEgM7eGeKjaTG2j3u134JgG4dAI6jQKhMc= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= +k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apiserver v0.35.1 h1:potxdhhTL4i6AYAa2QCwtlhtB1eCdWQFvJV6fXgJzxs= +k8s.io/apiserver v0.35.1/go.mod h1:BiL6Dd3A2I/0lBnteXfWmCFobHM39vt5+hJQd7Lbpi4= +k8s.io/cli-runtime v0.34.4 h1:QdGWDtJENTskib2Ab304Xwklv+lk4mxz+fd2ng36lZY= +k8s.io/cli-runtime v0.34.4/go.mod h1:PED/aZzYDUv6nPRGYXCFUnNOVBWlUDlVITu0Q3djDus= +k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= +k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.35.0 h1:TvrtfKYZTm9oDF2z+veFKSCcgZE3Igv0svY+ehCmjHQ= -k8s.io/code-generator v0.35.0/go.mod h1:iS1gvVf3c/T71N5DOGYO+Gt3PdJ6B9LYSvIyQ4FHzgc= -k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= -k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= -k8s.io/component-helpers v0.34.3 h1:Iws1GQfM89Lxo7IZITGmVdFOW0Bmyd7SVwwIu1/CCkE= -k8s.io/component-helpers v0.34.3/go.mod h1:S8HjjMTrUDVMVPo2EdNYRtQx9uIEIueQYdPMOe9UxJs= +k8s.io/code-generator v0.35.1 h1:yLKR2la7Z9cWT5qmk67ayx8xXLM4RRKQMnC8YPvTWRI= +k8s.io/code-generator v0.35.1/go.mod h1:F2Fhm7aA69tC/VkMXLDokdovltXEF026Tb9yfQXQWKg= +k8s.io/component-base v0.35.1 h1:XgvpRf4srp037QWfGBLFsYMUQJkE5yMa94UsJU7pmcE= +k8s.io/component-base v0.35.1/go.mod h1:HI/6jXlwkiOL5zL9bqA3en1Ygv60F03oEpnuU1G56Bs= +k8s.io/component-helpers v0.34.4 h1:NsYzF6cDjmACfNLhPuInNSeUhCOERZWITvWb4sQPpmE= +k8s.io/component-helpers v0.34.4/go.mod h1:LRO0sHo5LAGIh0jrZKngorJC1W54oJrk90q9pQDHM/4= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ= @@ -2383,10 +2383,10 @@ k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lV k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= -k8s.io/kubectl v0.34.3 h1:vpM6//153gh5gvsYHXWHVJ4l4xmN5QFwTSmlfd8icm8= -k8s.io/kubectl v0.34.3/go.mod h1:zZQHtIZoUqTP1bAnPzq/3W1jfc0NeOeunFgcswrfg1c= -k8s.io/kubelet v0.32.11 h1:pC2gWKhoODaiMAATeE32l2rh0ndOb46WTNio25AMXiM= -k8s.io/kubelet v0.32.11/go.mod h1:44b+NoN8TKFF0SxohEgmYNBMu0HiA17Z8f0pblM3oTY= +k8s.io/kubectl v0.34.4 h1:60NkmD2prPpAJIl81CO6QkQXJ2UlhH5LGIpFxlqK9D8= +k8s.io/kubectl v0.34.4/go.mod h1:Yqa6hDnryvuHFWA/NwJExnSATXMdPeMtOZstdTXeeIM= +k8s.io/kubelet v0.32.12 h1:IwokHQB9fMbeXq5IeTuswrp6JApn7BvnrAINzt7n4n0= +k8s.io/kubelet v0.32.12/go.mod h1:UCIXOO4MUE6gdTYN8qGS6lxS2d10ebz2bkG/q2EmW1M= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= From 3749e29c22e76f4afe1e2e0de6d041315cf5e52a Mon Sep 17 00:00:00 2001 From: Vlad Bologa Date: Sat, 14 Feb 2026 14:57:25 +0100 Subject: [PATCH 193/232] ROX-33084: fix local scan during CA rotation in co-located installs (#18975) --- .../central/extensions/common_test.go | 6 +- .../central/extensions/reconcile_tls.go | 82 +++++++++++++------ pkg/certgen/ca.go | 6 ++ pkg/scannerv4/client/client.go | 13 ++- pkg/scannerv4/client/options.go | 9 ++ .../centralcabundle/central_ca_bundle.go | 41 ++++++++++ .../centralcabundle/central_ca_bundle_test.go | 38 +++++++++ sensor/common/scannerclient/grpc_client.go | 11 ++- .../certrefresh/tls_challenge_cert_loader.go | 5 +- 9 files changed, 179 insertions(+), 32 deletions(-) create mode 100644 sensor/common/centralcabundle/central_ca_bundle.go create mode 100644 sensor/common/centralcabundle/central_ca_bundle_test.go diff --git a/operator/internal/central/extensions/common_test.go b/operator/internal/central/extensions/common_test.go index d2745ec052111..cbbfc5cb21310 100644 --- a/operator/internal/central/extensions/common_test.go +++ b/operator/internal/central/extensions/common_test.go @@ -287,9 +287,13 @@ func verifyCertMatchesCAHash(t *testing.T, secretData types.SecretDataMap, cache return } + if secondaryCAPEM, hasSecondaryCA := secretData[mtls.SecondaryCACertFileName]; hasSecondaryCA { + caPEM = append(caPEM, secondaryCAPEM...) + } + secretCAHash := confighash.ComputeCAHash(caPEM) assert.Equal(t, secretCAHash, cachedCAHash, - "CA hash in RenderCache should match the CA certificate in secret %s", secretName) + "CA hash in RenderCache should match the CA certificate(s) in secret %s", secretName) } func verifySecretsMatch( diff --git a/operator/internal/central/extensions/reconcile_tls.go b/operator/internal/central/extensions/reconcile_tls.go index 7fde1b2a93a7f..ed383ac1754b9 100644 --- a/operator/internal/central/extensions/reconcile_tls.go +++ b/operator/internal/central/extensions/reconcile_tls.go @@ -1,6 +1,7 @@ package extensions import ( + "bytes" "context" "crypto/x509" "fmt" @@ -62,6 +63,7 @@ type createCentralTLSExtensionRun struct { *commonExtensions.SecretReconciliator ca mtls.CA // primary CA, used to issue Central-services certificates + secondaryCA mtls.CA // secondary CA, for CA rotation support caRotationAction carotation.Action centralObj *platform.Central currentTime time.Time @@ -108,8 +110,13 @@ func (r *createCentralTLSExtensionRun) Execute(ctx context.Context) error { } if r.ca != nil { - // Add the hash of the CA to the render cache for the pod template annotation post renderer - r.renderCache.SetCAHash(r.centralObj, confighash.ComputeCAHash(r.ca.CertPEM())) + // Add the hash of the CA(s) to the render cache for the pod template annotation post renderer. + caPEM := r.ca.CertPEM() + if r.secondaryCA != nil { + // Include secondary CA if present so that pods restart when it's added during CA rotation. + caPEM = append(caPEM, r.secondaryCA.CertPEM()...) + } + r.renderCache.SetCAHash(r.centralObj, confighash.ComputeCAHash(caPEM)) } return nil // reconcileInitBundleSecrets not called due to ROX-9023. TODO(ROX-9969): call after the init-bundle cert rotation stabilization. @@ -170,11 +177,22 @@ func (r *createCentralTLSExtensionRun) validateAndConsumeCentralTLSData(fileMap return errors.Wrap(err, "primary CA is not valid at the present time") } - rotationAction, err := r.determineCARotationAction(fileMap) - if err != nil { - return err + // Load secondary CA (its presence is optional). + r.secondaryCA, err = certgen.LoadSecondaryCAFromFileMap(fileMap) + if err != nil && !errors.Is(err, certgen.ErrNoCACert) { + return errors.Wrap(err, "loading secondary CA failed") + } + if r.secondaryCA != nil { + if err := r.secondaryCA.CheckProperties(); err != nil { + return errors.Wrap(err, "loaded secondary CA is invalid") + } + } + + var secondaryCACert *x509.Certificate + if r.secondaryCA != nil { + secondaryCACert = r.secondaryCA.Certificate() } - r.caRotationAction = rotationAction + r.caRotationAction = carotation.DetermineAction(r.ca.Certificate(), secondaryCACert, r.currentTime) if r.caRotationAction != carotation.NoAction { return errors.New("CA rotation action needed") } @@ -186,23 +204,6 @@ func (r *createCentralTLSExtensionRun) validateAndConsumeCentralTLSData(fileMap return nil } -func (r *createCentralTLSExtensionRun) determineCARotationAction(fileMap types.SecretDataMap) (carotation.Action, error) { - secondaryCA, err := certgen.LoadSecondaryCAFromFileMap(fileMap) - var secondaryCACert *x509.Certificate - // the presence of a secondary CA certificate is optional - if err != nil && !errors.Is(err, certgen.ErrNoCACert) { - return carotation.NoAction, errors.Wrap(err, "loading secondary CA failed") - } - if secondaryCA != nil { - if err := secondaryCA.CheckProperties(); err != nil { - return carotation.NoAction, errors.Wrap(err, "loaded secondary service CA certificate is invalid") - } - secondaryCACert = secondaryCA.Certificate() - } - - return carotation.DetermineAction(r.ca.Certificate(), secondaryCACert, r.currentTime), nil -} - func (r *createCentralTLSExtensionRun) generateCentralTLSData(old types.SecretDataMap) (types.SecretDataMap, error) { var ( err error @@ -223,11 +224,15 @@ func (r *createCentralTLSExtensionRun) generateCentralTLSData(old types.SecretDa } if r.caRotationAction != carotation.NoAction { - primaryCA, err := certgen.LoadCAFromFileMap(newFileMap) + r.ca, err = certgen.LoadCAFromFileMap(newFileMap) if err != nil { return nil, errors.Wrap(err, "reloading new primary CA failed") } - r.ca = primaryCA + + r.secondaryCA, err = certgen.LoadSecondaryCAFromFileMap(newFileMap) + if err != nil && !errors.Is(err, certgen.ErrNoCACert) { + return nil, errors.Wrap(err, "loading secondary CA after rotation action failed") + } } } @@ -385,6 +390,29 @@ func (r *createCentralTLSExtensionRun) validateServiceTLSData(serviceType storag if err := certgen.VerifyCACertInFileMap(fileMap, r.ca); err != nil { return err } + + if centralCARotationEnabled.BooleanSetting() { + if err := r.verifySecondaryCACertInFileMap(fileMap); err != nil { + return err + } + } + return nil +} + +func (r *createCentralTLSExtensionRun) verifySecondaryCACertInFileMap(fileMap types.SecretDataMap) error { + secondaryCACertPEM := fileMap[mtls.SecondaryCACertFileName] + if r.secondaryCA == nil { + if len(secondaryCACertPEM) > 0 { + return errors.New("unexpected secondary CA certificate in file map") + } + return nil + } + if len(secondaryCACertPEM) == 0 { + return errors.New("missing secondary CA certificate in file map") + } + if !bytes.Equal(secondaryCACertPEM, r.secondaryCA.CertPEM()) { + return errors.New("mismatching secondary CA certificate in file map") + } return nil } @@ -410,6 +438,10 @@ func (r *createCentralTLSExtensionRun) generateServiceTLSData(subj mtls.Subject, return err } certgen.AddCACertToFileMap(fileMap, r.ca) + + if centralCARotationEnabled.BooleanSetting() && r.secondaryCA != nil { + certgen.AddSecondaryCACertToFileMap(fileMap, r.secondaryCA) + } return nil } diff --git a/pkg/certgen/ca.go b/pkg/certgen/ca.go index 01a82ab1a2b94..a9c719338d4fa 100644 --- a/pkg/certgen/ca.go +++ b/pkg/certgen/ca.go @@ -80,6 +80,12 @@ func AddCACertToFileMap(fileMap map[string][]byte, ca mtls.CA) { fileMap[mtls.CACertFileName] = ca.CertPEM() } +// AddSecondaryCACertToFileMap adds the public secondary CA certificate to the file map. +// This is used for service TLS secrets that need to trust both CAs but should not have signing capability. +func AddSecondaryCACertToFileMap(fileMap map[string][]byte, ca mtls.CA) { + fileMap[mtls.SecondaryCACertFileName] = ca.CertPEM() +} + // VerifyCACertInFileMap verifies that the public CA certificate stored in the given file // map is the same as the given one. func VerifyCACertInFileMap(fileMap map[string][]byte, ca mtls.CA) error { diff --git a/pkg/scannerv4/client/client.go b/pkg/scannerv4/client/client.go index 8f28691514f86..3ce05d42a14ed 100644 --- a/pkg/scannerv4/client/client.go +++ b/pkg/scannerv4/client/client.go @@ -2,6 +2,7 @@ package client import ( "context" + "crypto/x509" "errors" "fmt" "net/url" @@ -118,7 +119,7 @@ func NewGRPCScanner(ctx context.Context, opts ...Option) (Scanner, error) { if o.comboMode { // Both o.indexerOpts and o.matcherOpts are the same, so just choose one. - conn, err := createGRPCConn(ctx, o.indexerOpts) + conn, err := createGRPCConn(ctx, o.indexerOpts, o.rootCAs) if err != nil { return nil, err } @@ -141,7 +142,7 @@ func NewGRPCScanner(ctx context.Context, opts ...Option) (Scanner, error) { var indexerClient v4.IndexerClient if o.indexerOpts.address != "" { - conn, err := createGRPCConn(ctx, o.indexerOpts) + conn, err := createGRPCConn(ctx, o.indexerOpts, o.rootCAs) if err != nil { return nil, err } @@ -151,7 +152,7 @@ func NewGRPCScanner(ctx context.Context, opts ...Option) (Scanner, error) { var matcherClient v4.MatcherClient if o.matcherOpts.address != "" { - conn, err := createGRPCConn(ctx, o.matcherOpts) + conn, err := createGRPCConn(ctx, o.matcherOpts, o.rootCAs) if err != nil { return nil, err } @@ -176,7 +177,7 @@ func (c *gRPCScanner) Close() error { return errList.ToError() } -func createGRPCConn(ctx context.Context, o connOptions) (*grpc.ClientConn, error) { +func createGRPCConn(ctx context.Context, o connOptions, rootCAs []*x509.Certificate) (*grpc.ClientConn, error) { // Prefix address with dns:/// to use the DNS name resolver. address := "dns:///" + o.address @@ -219,6 +220,10 @@ func createGRPCConn(ctx context.Context, o connOptions) (*grpc.ClientConn, error clientconn.MaxMsgReceiveSize(maxRespMsgSize), clientconn.WithDialOptions(dialOpts...), } + + if len(rootCAs) > 0 { + connOpts = append(connOpts, clientconn.AddRootCAs(rootCAs...)) + } return clientconn.AuthenticatedGRPCConnection(ctx, address, o.mTLSSubject, connOpts...) } diff --git a/pkg/scannerv4/client/options.go b/pkg/scannerv4/client/options.go index 6133365b3c332..9141b2e02ea44 100644 --- a/pkg/scannerv4/client/options.go +++ b/pkg/scannerv4/client/options.go @@ -1,6 +1,7 @@ package client import ( + "crypto/x509" "errors" "fmt" "net" @@ -41,6 +42,7 @@ type options struct { indexerOpts connOptions matcherOpts connOptions comboMode bool + rootCAs []*x509.Certificate } type ImageRegistryOpt struct { @@ -141,6 +143,13 @@ func WithMatcherAddress(address string) Option { } } +// WithRootCAs specifies additional root CAs to trust when verifying the server's certificate. +func WithRootCAs(certs ...*x509.Certificate) Option { + return func(o *options) { + o.rootCAs = append(o.rootCAs, certs...) + } +} + func makeOptions(opts ...Option) (options, error) { o := defaultOptions for _, opt := range opts { diff --git a/sensor/common/centralcabundle/central_ca_bundle.go b/sensor/common/centralcabundle/central_ca_bundle.go new file mode 100644 index 0000000000000..e0a9d025cb46a --- /dev/null +++ b/sensor/common/centralcabundle/central_ca_bundle.go @@ -0,0 +1,41 @@ +// Package centralcabundle provides a global cache for Central CA certificates obtained via TLSChallenge. +// +// Used by Scanner V4 client to trust both CAs during CA rotation. Global state is used because +// storing secondary-ca.pem in tls-cert-* secrets wouldn't work for Helm deployments (pods don't +// restart to pick up CA changes). +// +// This could be replaced by loading certs from secrets if ROX-29506 (certificate hot reloading) +// is implemented, or if Helm-managed Secured Clusters are deprecated. +package centralcabundle + +import ( + "crypto/x509" + + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/sync" +) + +var ( + log = logging.LoggerForModule() + + caCerts []*x509.Certificate + caCertsMutex sync.RWMutex +) + +// Set stores the Central CA certificates. +func Set(cas []*x509.Certificate) { + caCertsMutex.Lock() + defer caCertsMutex.Unlock() + caCerts = append([]*x509.Certificate(nil), cas...) + if len(cas) > 0 { + log.Infof("Stored %d Central CA certificate(s)", len(cas)) + } +} + +// Get returns a copy of the stored Central CA certificates. +// Returns nil if no CAs have been stored. +func Get() []*x509.Certificate { + caCertsMutex.RLock() + defer caCertsMutex.RUnlock() + return append([]*x509.Certificate(nil), caCerts...) +} diff --git a/sensor/common/centralcabundle/central_ca_bundle_test.go b/sensor/common/centralcabundle/central_ca_bundle_test.go new file mode 100644 index 0000000000000..489232ff222ff --- /dev/null +++ b/sensor/common/centralcabundle/central_ca_bundle_test.go @@ -0,0 +1,38 @@ +package centralcabundle + +import ( + "crypto/x509" + "crypto/x509/pkix" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetAndGet(t *testing.T) { + Set(nil) + + result := Get() + assert.Empty(t, result) + + certs := []*x509.Certificate{ + {Subject: pkix.Name{CommonName: "CA1"}}, + {Subject: pkix.Name{CommonName: "CA2"}}, + } + Set(certs) + + result = Get() + assert.Len(t, result, 2) + assert.Equal(t, "CA1", result[0].Subject.CommonName) + assert.Equal(t, "CA2", result[1].Subject.CommonName) +} + +func TestSetNilClearsStore(t *testing.T) { + Set([]*x509.Certificate{ + {Subject: pkix.Name{CommonName: "CA1"}}, + }) + + assert.Len(t, Get(), 1) + Set(nil) + + assert.Empty(t, Get()) +} diff --git a/sensor/common/scannerclient/grpc_client.go b/sensor/common/scannerclient/grpc_client.go index 44491ceacb14e..1dd8853914d86 100644 --- a/sensor/common/scannerclient/grpc_client.go +++ b/sensor/common/scannerclient/grpc_client.go @@ -17,6 +17,7 @@ import ( "github.com/stackrox/rox/pkg/registries/types" pkgscanner "github.com/stackrox/rox/pkg/scannerv4" "github.com/stackrox/rox/pkg/scannerv4/client" + "github.com/stackrox/rox/sensor/common/centralcabundle" scannerV1 "github.com/stackrox/scanner/generated/scanner/api/v1" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -156,7 +157,15 @@ func dialV2() (ScannerClient, error) { // dialV4 connect to scanner V4 gRPC and return a new ScannerClient. func dialV4() (ScannerClient, error) { ctx := context.Background() - c, err := client.NewGRPCScanner(ctx, client.WithIndexerAddress(env.ScannerV4IndexerEndpoint.Setting())) + opts := []client.Option{client.WithIndexerAddress(env.ScannerV4IndexerEndpoint.Setting())} + + // Make sure the Scanner V4 client trusts all internal CAs. + if cas := centralcabundle.Get(); len(cas) > 0 { + log.Infof("Adding %d Central CA certificate(s) to Scanner V4 client", len(cas)) + opts = append(opts, client.WithRootCAs(cas...)) + } + + c, err := client.NewGRPCScanner(ctx, opts...) if err != nil { return nil, errors.Wrap(err, "dialing scanner V4 gRPC client") } diff --git a/sensor/kubernetes/certrefresh/tls_challenge_cert_loader.go b/sensor/kubernetes/certrefresh/tls_challenge_cert_loader.go index 2b9843a7627f9..ab5d32df71966 100644 --- a/sensor/kubernetes/certrefresh/tls_challenge_cert_loader.go +++ b/sensor/kubernetes/certrefresh/tls_challenge_cert_loader.go @@ -6,6 +6,7 @@ import ( "os" "github.com/stackrox/rox/pkg/pods" + "github.com/stackrox/rox/sensor/common/centralcabundle" "github.com/stackrox/rox/sensor/common/centralclient" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -13,7 +14,8 @@ import ( // TLSChallengeCertLoader returns a centralclient.CertLoader that: // - performs the TLS challenge with Central and retrieves its trusted certificates -// - ceates/updates CA bundle ConfigMap for Admission Control's ValidatingWebhookConfiguration +// - creates/updates CA bundle ConfigMap for Admission Control's ValidatingWebhookConfiguration +// - stores Central CAs for use by the Scanner V4 client (Scanner V4 might be using a different CA than the one used by Sensor) // This is used for Operator managed clusters to enable CA rotation. func TLSChallengeCertLoader(centralClient *centralclient.Client, k8sClient kubernetes.Interface) centralclient.CertLoader { return func() []*x509.Certificate { @@ -27,6 +29,7 @@ func TLSChallengeCertLoader(centralClient *centralclient.Client, k8sClient kuber } else if len(centralCAs) > 0 { log.Debug("Updating TLS CA bundle ConfigMap from TLSChallenge") handleCABundleConfigMapUpdate(ctx, centralCAs, k8sClient) + centralcabundle.Set(centralCAs) } return certs } From 3fd3908e9be9ba100ff9d0de3f5488210c966c75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 02:05:36 -0700 Subject: [PATCH 194/232] chore(deps): bump google.golang.org/grpc from 1.78.0 to 1.79.1 (#19039) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 24 ++++++++++++------------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 91d15471e2f48..859b846ebe412 100644 --- a/go.mod +++ b/go.mod @@ -153,7 +153,7 @@ require ( google.golang.org/api v0.266.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 - google.golang.org/grpc v1.78.0 + google.golang.org/grpc v1.79.1 google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 google.golang.org/protobuf v1.36.11 gopkg.in/mcuadros/go-syslog.v2 v2.3.0 @@ -261,7 +261,7 @@ require ( github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect + github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/cockroachdb/crlib v0.0.0-20250617202621-0794c595bbe6 // indirect github.com/cockroachdb/errors v1.12.0 // indirect github.com/cockroachdb/logtags v0.0.0-20241215232642-bb51bb14a506 // indirect @@ -291,8 +291,8 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect @@ -488,7 +488,7 @@ require ( go.etcd.io/bbolt v1.4.3 // indirect go.mongodb.org/mongo-driver v1.17.6 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 // indirect go.opentelemetry.io/otel v1.40.0 // indirect diff --git a/go.sum b/go.sum index 24800d30efcaa..88a053e83c9eb 100644 --- a/go.sum +++ b/go.sum @@ -404,8 +404,8 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= -github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs= github.com/cockroachdb/cockroach-go/v2 v2.2.0/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI= @@ -535,16 +535,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= -github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= -github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= -github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= -github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -1675,8 +1675,8 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= -go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= -go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= +go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= @@ -2284,8 +2284,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds= google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8= From fdf6c36fd74892ac2408b9b083ac86083d88d07d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 02:05:52 -0700 Subject: [PATCH 195/232] chore(deps): bump sigs.k8s.io/controller-tools from 0.20.0 to 0.20.1 (#19038) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 859b846ebe412..d54158bc462e9 100644 --- a/go.mod +++ b/go.mod @@ -174,7 +174,7 @@ require ( k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 kubevirt.io/api v1.7.0 sigs.k8s.io/controller-runtime v0.23.1 - sigs.k8s.io/controller-tools v0.20.0 + sigs.k8s.io/controller-tools v0.20.1 sigs.k8s.io/e2e-framework v0.6.0 sigs.k8s.io/yaml v1.6.0 ) diff --git a/go.sum b/go.sum index 88a053e83c9eb..5c40354744e20 100644 --- a/go.sum +++ b/go.sum @@ -2432,8 +2432,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= -sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= -sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= +sigs.k8s.io/controller-tools v0.20.1 h1:gkfMt9YodI0K85oT8rVi80NTXO/kDmabKR5Ajn5GYxs= +sigs.k8s.io/controller-tools v0.20.1/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= sigs.k8s.io/e2e-framework v0.6.0 h1:p7hFzHnLKO7eNsWGI2AbC1Mo2IYxidg49BiT4njxkrM= sigs.k8s.io/e2e-framework v0.6.0/go.mod h1:IREnCHnKgRCioLRmNi0hxSJ1kJ+aAdjEKK/gokcZu4k= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= From d02a252718b02fcf930ab3835593822e9c22d474 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Mon, 16 Feb 2026 02:30:42 -0700 Subject: [PATCH 196/232] chore(fact): Update FACT_VERSION (#18912) --- FACT_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/FACT_VERSION b/FACT_VERSION index ee8685c26aa9f..60e8e248c535b 100644 --- a/FACT_VERSION +++ b/FACT_VERSION @@ -1 +1 @@ -0.2.x-5-gcf5e7419fa +0.2.x-65-gba9b8a9ecd From 31c4a8d80ee0fd214de0eba90969d238589a1729 Mon Sep 17 00:00:00 2001 From: rhacs-bot <148914812+rhacs-bot@users.noreply.github.com> Date: Mon, 16 Feb 2026 02:31:06 -0700 Subject: [PATCH 197/232] chore(collector): Update COLLECTOR_VERSION (#18712) --- COLLECTOR_VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/COLLECTOR_VERSION b/COLLECTOR_VERSION index b94c548573151..ab78672dc0a0d 100644 --- a/COLLECTOR_VERSION +++ b/COLLECTOR_VERSION @@ -1 +1 @@ -3.23.x-117-g24f41bdc67 +3.24.0-26-gc247140c1a From 64714332b2c9635521bf21abe71b1d90bf4516a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 03:33:25 -0700 Subject: [PATCH 198/232] chore(deps): bump io.rest-assured:rest-assured from 5.5.6 to 6.0.0 in /qa-tests-backend (#18209) Signed-off-by: dependabot[bot] Signed-off-by: Tomasz Janiszewski Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Tomasz Janiszewski Co-authored-by: Claude Sonnet 4.5 --- qa-tests-backend/gradle/libs.versions.toml | 8 +- .../groovy/objects/ImageIntegration.groovy | 124 ++++++++++++++++-- .../src/test/groovy/AuthServiceTest.groovy | 20 +-- .../src/test/groovy/BaseSpecification.groovy | 4 +- .../src/test/groovy/ImageScanningTest.groovy | 6 +- 5 files changed, 131 insertions(+), 31 deletions(-) diff --git a/qa-tests-backend/gradle/libs.versions.toml b/qa-tests-backend/gradle/libs.versions.toml index fc1aed7c0c875..f14494dd3e5a8 100644 --- a/qa-tests-backend/gradle/libs.versions.toml +++ b/qa-tests-backend/gradle/libs.versions.toml @@ -4,7 +4,7 @@ protobuf = "4.33.2" # If the proto versions are changed, be sure it is also chan netty-tcnative = "2.0.61.Final" fabric8 = "7.4.0" jackson = "2.14.2" -groovy = "4.0.28" +groovy = "5.0.3" [libraries] gson = { module = "com.google.code.gson:gson", version = "2.13.2" } @@ -15,12 +15,12 @@ grpc-protobuf = { module = "io.grpc:grpc-protobuf", version.ref = "grpc" } grpc-stub = { module = "io.grpc:grpc-stub", version.ref = "grpc" } grpc-auth = { module = "io.grpc:grpc-auth", version.ref = "grpc" } netty-tcnative-boringssl-static = { module = "io.netty:netty-tcnative-boringssl-static", version.ref = "netty-tcnative" } -groovy-bom = { module = "org.apache.groovy:groovy-bom", version = "4.0.28" } +groovy-bom = { module = "org.apache.groovy:groovy-bom", version = "groovy" } groovy = { module = "org.apache.groovy:groovy", version.ref = "groovy" } -spock-bom = { module = "org.spockframework:spock-bom", version = "2.3-groovy-4.0" } +spock-bom = { module = "org.spockframework:spock-bom", version = "2.4-M7-groovy-5.0" } spock-core = { module = "org.spockframework:spock-core" } spock-junit4 = { module = "org.spockframework:spock-junit4" } -rest-assured = { module = "io.rest-assured:rest-assured", version = "5.5.6" } +rest-assured = { module = "io.rest-assured:rest-assured", version = "6.0.0" } snakeyaml = { module = "org.yaml:snakeyaml", version = "2.5" } logback-classic = { module = "ch.qos.logback:logback-classic", version = "1.5.22" } jackson-core = { module = "com.fasterxml.jackson.core:jackson-core", version.ref = "jackson" } diff --git a/qa-tests-backend/src/main/groovy/objects/ImageIntegration.groovy b/qa-tests-backend/src/main/groovy/objects/ImageIntegration.groovy index 7c4cf1d3e771f..ff55932b96314 100644 --- a/qa-tests-backend/src/main/groovy/objects/ImageIntegration.groovy +++ b/qa-tests-backend/src/main/groovy/objects/ImageIntegration.groovy @@ -7,14 +7,19 @@ import util.Env trait ImageIntegration { abstract static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs) +} + +class StackroxScannerIntegration implements ImageIntegration { - static ImageIntegrationOuterClass.ImageIntegration.Builder getDefaultBuilder() { - getCustomBuilder() + static String name() { Constants.AUTO_REGISTERED_STACKROX_SCANNER_INTEGRATION } + + static Boolean isTestable() { + return true } static String createDefaultIntegration() { ImageIntegrationService.createImageIntegration( - getDefaultBuilder().build() + getCustomBuilder([:]).build() ) } @@ -24,15 +29,6 @@ trait ImageIntegration { customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration ) } -} - -class StackroxScannerIntegration implements ImageIntegration { - - static String name() { Constants.AUTO_REGISTERED_STACKROX_SCANNER_INTEGRATION } - - static Boolean isTestable() { - return true - } static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ @@ -67,6 +63,19 @@ class ClairScannerIntegration implements ImageIntegration { return Env.get("CLAIR_ENDPOINT") != null } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "clair", @@ -95,6 +104,19 @@ class ClairV4ScannerIntegration implements ImageIntegration { return Env.get("CLAIR_V4_ENDPOINT") != null } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "clairv4", @@ -125,6 +147,19 @@ class ECRRegistryIntegration implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "ecr", @@ -188,6 +223,19 @@ class AzureRegistryIntegration implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ configSchema: "AzureConfig", @@ -236,6 +284,19 @@ class QuayImageIntegration implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "quay", @@ -277,6 +338,19 @@ class GHCRImageIntegration implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "ghcr", @@ -309,6 +383,19 @@ class GoogleArtifactRegistry implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "google-artifact-registry", @@ -349,6 +436,19 @@ class GCRImageIntegration implements ImageIntegration { return true } + static String createDefaultIntegration() { + ImageIntegrationService.createImageIntegration( + getCustomBuilder([:]).build() + ) + } + + static String createCustomIntegration(Map customArgs = [:]) { + ImageIntegrationService.createImageIntegration( + getCustomBuilder(customArgs).build(), + customArgs.containsKey("skipTestIntegration") && customArgs.skipTestIntegration + ) + } + static ImageIntegrationOuterClass.ImageIntegration.Builder getCustomBuilder(Map customArgs = [:]) { Map defaultArgs = [ name: "gcr", diff --git a/qa-tests-backend/src/test/groovy/AuthServiceTest.groovy b/qa-tests-backend/src/test/groovy/AuthServiceTest.groovy index a9352175ab004..5967badcc2abe 100644 --- a/qa-tests-backend/src/test/groovy/AuthServiceTest.groovy +++ b/qa-tests-backend/src/test/groovy/AuthServiceTest.groovy @@ -26,19 +26,19 @@ class AuthServiceTest extends BaseSpecification { assert status assert status.userId == "admin" - status.authProvider.with { - assert name == "Login with username/password" - assert id == "4df1b98c-24ed-4073-a9ad-356aec6bb62d" - assert type == "basic" + verifyAll(status.authProvider) { + name == "Login with username/password" + id == "4df1b98c-24ed-4073-a9ad-356aec6bb62d" + type == "basic" } - status.userInfo.with { - assert permissions.resourceToAccessCount > 0 + verifyAll(status.userInfo) { + permissions.resourceToAccessCount > 0 permissions.resourceToAccessMap.each { assert it.value == RoleOuterClass.Access.READ_WRITE_ACCESS } def adminRole = rolesList.find { it.name == "Admin" } - assert adminRole + adminRole } def attrMap = getAttrMap(status.userAttributesList) @@ -57,14 +57,14 @@ class AuthServiceTest extends BaseSpecification { assert status.userId.startsWith("auth-token:") assert !status.authProvider.id - status.userInfo.with { - assert permissions.resourceToAccessCount > 0 + verifyAll(status.userInfo) { + permissions.resourceToAccessCount > 0 permissions.resourceToAccessMap.each { assert it.value == RoleOuterClass.Access.READ_WRITE_ACCESS } def tokenRole = rolesList.find { it.name.startsWith("Test Automation Role - ") } - assert tokenRole + tokenRole } def attrMap = getAttrMap(status.userAttributesList) diff --git a/qa-tests-backend/src/test/groovy/BaseSpecification.groovy b/qa-tests-backend/src/test/groovy/BaseSpecification.groovy index 1c69b47d0c2ce..096d455ae6b90 100644 --- a/qa-tests-backend/src/test/groovy/BaseSpecification.groovy +++ b/qa-tests-backend/src/test/groovy/BaseSpecification.groovy @@ -209,7 +209,7 @@ class BaseSpecification extends Specification { TimeUnit.SECONDS ) @Rule - TestName name = new TestName() + protected final TestName currentTestName = new TestName() @Shared Logger log = LoggerFactory.getLogger("test." + this.getClass().getSimpleName()) @@ -290,7 +290,7 @@ class BaseSpecification extends Specification { // These .puts() have to be repeated here or else the key is cleared. MDC.put("logFileName", this.class.getSimpleName()) MDC.put("specification", this.class.getSimpleName()) - log.info("Starting testcase: ${name.getMethodName()}") + log.info("Starting testcase: ${currentTestName.getMethodName()}") // Make sure to use or revert back to the desired central gRPC auth // before each test. diff --git a/qa-tests-backend/src/test/groovy/ImageScanningTest.groovy b/qa-tests-backend/src/test/groovy/ImageScanningTest.groovy index 257f4833c2308..2d699ab711174 100644 --- a/qa-tests-backend/src/test/groovy/ImageScanningTest.groovy +++ b/qa-tests-backend/src/test/groovy/ImageScanningTest.groovy @@ -312,9 +312,9 @@ class ImageScanningTest extends BaseSpecification { where: "Data inputs:" - testName | integration | - addIntegrationClosure | - components | totalCves | fixable + // testName | integration | + // addIntegrationClosure | + // components | totalCves | fixable // ROX-9448 - disable Quay until scanning is fixed // "quay-keep-autogenerated" | "quay" | From 733041a36ea013cdf27711a821afeafea3f9eb6d Mon Sep 17 00:00:00 2001 From: Giles Hutton Date: Mon, 16 Feb 2026 11:02:55 +0000 Subject: [PATCH 199/232] ROX-33098: combines file path criteria (#18916) Actual Path and Effective Path criteria dropped in favor of a singular File Path criterion. --- CHANGELOG.md | 1 + central/policy/service/validator_test.go | 68 +++++----- pkg/booleanpolicy/augmentedobjs/construct.go | 23 +++- .../augmentedobjs/custom_types.go | 10 ++ pkg/booleanpolicy/augmentedobjs/meta.go | 4 +- pkg/booleanpolicy/deployment_policies_test.go | 92 +++++++------- pkg/booleanpolicy/field_metadata.go | 15 +-- pkg/booleanpolicy/fieldnames/list.go | 3 +- pkg/booleanpolicy/node_policies_test.go | 20 ++- pkg/booleanpolicy/validate.go | 5 +- pkg/booleanpolicy/validate_test.go | 42 +------ .../violationmessages/printer.go | 2 +- .../Step3/policyCriteriaDescriptors.tsx | 26 +--- .../Step3/policyCriteriaValidators.test.ts | 117 ++++++------------ .../Wizard/Step3/policyCriteriaValidators.ts | 25 +--- 15 files changed, 185 insertions(+), 268 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0221ddb19121b..3154bfc3ef974 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ### Added Features - ROX-24311: Detection and enforcement for pods/attach Kubernetes event +- ROX-33098 (Tech Preview): Effective path and Actual Path have been combined into a single File Path policy criterion. ### Removed Features diff --git a/central/policy/service/validator_test.go b/central/policy/service/validator_test.go index 1c515c7247f54..14bfb67474e5c 100644 --- a/central/policy/service/validator_test.go +++ b/central/policy/service/validator_test.go @@ -992,37 +992,29 @@ func (s *PolicyValidatorTestSuite) TestValidateEnforcement() { } } -func (s *PolicyValidatorTestSuite) TestValidateEffectivePathEventSource() { +func (s *PolicyValidatorTestSuite) TestValidateDeploymentFileActivityEventSource() { testCases := []struct { description string p *storage.Policy errExpected bool }{ { - description: "Deployment policy with valid Effective Path field", + description: "Deployment policy with valid File Path field", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ - fieldnames.EffectivePath: "/etc/passwd", + fieldnames.FilePath: "/etc/passwd", }), }, { - description: "Deployment policy with Effective Path and FileOperation", + description: "Deployment policy with File Path and File Operation", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ - fieldnames.EffectivePath: "/etc/shadow", + fieldnames.FilePath: "/etc/shadow", fieldnames.FileOperation: "open", }), }, { - description: "Node policy with Effective Path (should be invalid)", - p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, - map[string]string{ - fieldnames.EffectivePath: "/etc/passwd", - }), - errExpected: true, - }, - { - description: "Deployment policy with FileOperation but no file path", + description: "Deployment policy with File Operation but no file path", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ fieldnames.FileOperation: "open", @@ -1030,26 +1022,26 @@ func (s *PolicyValidatorTestSuite) TestValidateEffectivePathEventSource() { errExpected: true, }, { - description: "Deployment policy with invalid Effective Path", + description: "Deployment policy with invalid File Path", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ - fieldnames.EffectivePath: "relative/path.sh", + fieldnames.FilePath: "relative/path.sh", }), errExpected: true, }, { - description: "Deployment policy with Effective Path in wrong lifecycle stage (build)", + description: "Deployment policy with File Path in wrong lifecycle stage (build)", p: booleanPolicyWithFields(storage.LifecycleStage_BUILD, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ - fieldnames.EffectivePath: "/etc/hosts", + fieldnames.FilePath: "/etc/hosts", }), errExpected: true, }, { - description: "Deployment policy with Effective Path in wrong lifecycle stage (deploy)", + description: "Deployment policy with File Path in wrong lifecycle stage (deploy)", p: booleanPolicyWithFields(storage.LifecycleStage_DEPLOY, storage.EventSource_DEPLOYMENT_EVENT, map[string]string{ - fieldnames.EffectivePath: "/etc/passwd", + fieldnames.FilePath: "/etc/passwd", }), errExpected: true, }, @@ -1084,10 +1076,10 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { errExpected bool }{ { - description: "Node policy with valid Actual Path field", + description: "Node policy with valid File Path field", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "/etc/passwd", + fieldnames.FilePath: "/etc/passwd", }), }, { @@ -1123,49 +1115,49 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { errExpected: true, }, { - description: "Node policy with Actual Path and invalid process fields", + description: "Node policy with File Path and invalid process fields", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "/var/log/audit.log", + fieldnames.FilePath: "/var/log/audit.log", fieldnames.ProcessName: "suspicious-binary", }), errExpected: true, }, { - description: "Node policy with Actual Path and invalid container fields", + description: "Node policy with File Path and invalid container fields", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "/etc/shadow", + fieldnames.FilePath: "/etc/shadow", fieldnames.ContainerName: "malicious-container", }), errExpected: true, }, { - description: "Node policy with Actual Path in wrong lifecycle stage (build)", + description: "Node policy with File Path in wrong lifecycle stage (build)", p: booleanPolicyWithFields(storage.LifecycleStage_BUILD, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "/etc/hosts", + fieldnames.FilePath: "/etc/hosts", }), errExpected: true, }, { - description: "Node policy with Actual Path in wrong lifecycle stage (deploy)", + description: "Node policy with File Path in wrong lifecycle stage (deploy)", p: booleanPolicyWithFields(storage.LifecycleStage_DEPLOY, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "/tmp/malicious.sh", + fieldnames.FilePath: "/tmp/malicious.sh", }), errExpected: true, }, { - description: "Node policy invalid Actual Path", + description: "Node policy invalid File Path", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ - fieldnames.ActualPath: "relative/path.sh", + fieldnames.FilePath: "relative/path.sh", }), errExpected: true, }, { - description: "Node policy with valid FileOperation field but no file path", + description: "Node policy with valid File Operation field but no File Path", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ fieldnames.FileOperation: "open", @@ -1173,15 +1165,15 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { errExpected: true, }, { - description: "Node policy with FileOperation and valid Actual Path field", + description: "Node policy with File Operation and valid File Path field", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ fieldnames.FileOperation: "open", - fieldnames.ActualPath: "/etc/passwd", + fieldnames.FilePath: "/etc/passwd", }), }, { - description: "Node policy with invalid FileOperation", + description: "Node policy with invalid File Operation", p: booleanPolicyWithFields(storage.LifecycleStage_RUNTIME, storage.EventSource_NODE_EVENT, map[string]string{ fieldnames.FileOperation: "execute", @@ -1189,7 +1181,7 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { errExpected: true, }, { - description: "Node policy with FileOperation in wrong lifecycle stage (build)", + description: "Node policy with File Operation in wrong lifecycle stage (build)", p: booleanPolicyWithFields(storage.LifecycleStage_BUILD, storage.EventSource_NODE_EVENT, map[string]string{ fieldnames.FileOperation: "open", @@ -1197,7 +1189,7 @@ func (s *PolicyValidatorTestSuite) TestValidateNodeEventSource() { errExpected: true, }, { - description: "Node policy with FileOperation in wrong lifecycle stage (deploy)", + description: "Node policy with File Operation in wrong lifecycle stage (deploy)", p: booleanPolicyWithFields(storage.LifecycleStage_DEPLOY, storage.EventSource_NODE_EVENT, map[string]string{ fieldnames.FileOperation: "open", diff --git a/pkg/booleanpolicy/augmentedobjs/construct.go b/pkg/booleanpolicy/augmentedobjs/construct.go index 3bd6bb3a4ddc3..1a4a55d600ec0 100644 --- a/pkg/booleanpolicy/augmentedobjs/construct.go +++ b/pkg/booleanpolicy/augmentedobjs/construct.go @@ -132,7 +132,7 @@ func ConstructNodeWithFileAccess(node *storage.Node, fileAccess *storage.FileAcc } err = nodeObj.AddAugmentedObjAt( - pathutil.NewAugmentedObj(fileAccess), + ConstructFileAccess(fileAccess), pathutil.FieldStep(fileAccessKey), ) @@ -161,11 +161,30 @@ func ConstructDeploymentWithFileAccess( if err != nil { return nil, err } + return obj, nil } func ConstructFileAccess(fileAccess *storage.FileAccess) *pathutil.AugmentedObj { - return pathutil.NewAugmentedObj(fileAccess) + obj := pathutil.NewAugmentedObj(fileAccess) + + // By combining the actual and effective paths into a single + // slice, we allow logical disjunction for a single policy + // criterion, avoiding the need for a path criterion for each + // type of path. + fileAccessPaths := &fileAccessPath{ + Path: []string{ + fileAccess.GetFile().GetActualPath(), + fileAccess.GetFile().GetEffectivePath(), + }, + } + + err := obj.AddPlainObjAt(fileAccessPaths, pathutil.FieldStep(fileAccessPathKey)) + if err != nil { + return nil + } + + return obj } // ConstructDeploymentWithNetworkFlowInfo constructs an augmented object with deployment and network flow. diff --git a/pkg/booleanpolicy/augmentedobjs/custom_types.go b/pkg/booleanpolicy/augmentedobjs/custom_types.go index 7208b3d99c39e..412d363a7a02d 100644 --- a/pkg/booleanpolicy/augmentedobjs/custom_types.go +++ b/pkg/booleanpolicy/augmentedobjs/custom_types.go @@ -26,6 +26,7 @@ const ( KubernetesSourceIPAddressCustomTag = "Source IP Address" KubernetesUserAgentCustomTag = "User Agent" KubernetesIsImpersonatedCustomTag = "Is Impersonated User" + FileAccessPathCustomTag = "File Path" RuntimeClassCustomTag = "Runtime Class" ) @@ -91,3 +92,12 @@ type NodeDetails struct { ClusterId string `search:"Cluster Id"` ClusterName string `search:"Cluster Name"` } + +// fileAccessPath is a struct to contain ALL file paths +// for a given file activity event. This allows us to +// compare them all against a single criteria instead +// of requiring a criteria for each "kind" of path +// (effective or actual) +type fileAccessPath struct { + Path []string `search:"File Path"` +} diff --git a/pkg/booleanpolicy/augmentedobjs/meta.go b/pkg/booleanpolicy/augmentedobjs/meta.go index 3fa3d61a21544..07a5df80becdf 100644 --- a/pkg/booleanpolicy/augmentedobjs/meta.go +++ b/pkg/booleanpolicy/augmentedobjs/meta.go @@ -20,6 +20,7 @@ const ( baselineResultAugmentKey = "BaselineResult" envVarAugmentKey = "EnvironmentVariable" impersonatedEventResultKey = "ImpersonatedEventResult" + fileAccessPathKey = "FilePath" ) // This block enumerates metadata about the augmented objects we use in policies. @@ -63,7 +64,8 @@ var ( NetworkPoliciesAppliedMeta = pathutil.NewAugmentedObjMeta((*NetworkPoliciesApplied)(nil)) - FileAccessMeta = pathutil.NewAugmentedObjMeta((*storage.FileAccess)(nil)) + FileAccessMeta = pathutil.NewAugmentedObjMeta((*storage.FileAccess)(nil)). + AddPlainObjectAt([]string{fileAccessPathKey}, (*fileAccessPath)(nil)) NodeMeta = pathutil.NewAugmentedObjMeta((*NodeDetails)(nil)). AddAugmentedObjectAt([]string{fileAccessKey}, FileAccessMeta) diff --git a/pkg/booleanpolicy/deployment_policies_test.go b/pkg/booleanpolicy/deployment_policies_test.go index 04994b9169509..3868b2484700f 100644 --- a/pkg/booleanpolicy/deployment_policies_test.go +++ b/pkg/booleanpolicy/deployment_policies_test.go @@ -689,7 +689,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, }, { @@ -702,7 +702,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, }, { @@ -726,7 +726,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, }, { @@ -739,7 +739,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, }, { @@ -763,7 +763,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, }, }, @@ -772,7 +772,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.EffectivePath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, }, }, @@ -792,7 +792,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, }, }, @@ -801,7 +801,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.EffectivePath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, }, }, @@ -821,12 +821,11 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - { - FieldName: fieldnames.EffectivePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{ + {Value: "/etc/passwd"}, + {Value: "/etc/shadow"}, + }, }, }, }, @@ -834,7 +833,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, }, }, @@ -864,22 +863,32 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { }, }, { - description: "Event with both paths - policy requires BOTH but only actual path matches", + description: "Event with both paths - policy requires EITHER and only actual path matches", policy: s.getDualPathPolicy("/etc/passwd", "/etc/sudoers", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), events: []eventWrapper{ { access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, + expectAlert: true, }, }, }, { - description: "Event with both paths - policy requires BOTH but only effective matches", + description: "Event with both paths - policy requires EITHER and only effective matches", policy: s.getDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), events: []eventWrapper{ { access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy requires EITHER and only BOTH match", + policy: s.getDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), + events: []eventWrapper{ + { + access: s.getDualPathFileAccessEvent("/etc/sudoers", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, }, }, }, @@ -900,7 +909,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, }, }, @@ -909,7 +918,7 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.EffectivePath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, }, }, @@ -929,12 +938,12 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/passwd"}, {Value: "/etc/shadow"}}, }, { - FieldName: fieldnames.EffectivePath, - Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "UNLINK"}}, }, }, }, @@ -942,12 +951,12 @@ func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { SectionName: "section 2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, - Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/shadow"}, {Value: "/etc/ssh/sshd_config"}}, }, { - FieldName: fieldnames.EffectivePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "UNLINK"}}, }, }, }, @@ -1024,7 +1033,7 @@ func (s *DeploymentDetectionTestSuite) getDeploymentEffectiveFileAccessEvent(pat } // getFileAccessPolicy is a generic helper for creating file access policies. -func (s *DeploymentDetectionTestSuite) getFileAccessPolicy(isActualPath bool, operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { +func (s *DeploymentDetectionTestSuite) getFileAccessPolicy(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { var pathValues []*storage.PolicyValue for _, path := range paths { pathValues = append(pathValues, &storage.PolicyValue{ @@ -1032,14 +1041,9 @@ func (s *DeploymentDetectionTestSuite) getFileAccessPolicy(isActualPath bool, op }) } - fieldName := fieldnames.ActualPath - if !isActualPath { - fieldName = fieldnames.EffectivePath - } - policyGroups := []*storage.PolicyGroup{ { - FieldName: fieldName, + FieldName: fieldnames.FilePath, Values: pathValues, }, } @@ -1077,19 +1081,19 @@ func (s *DeploymentDetectionTestSuite) getFileAccessPolicy(isActualPath bool, op } func (s *DeploymentDetectionTestSuite) getDeploymentFileAccessPolicyWithOperations(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - return s.getFileAccessPolicy(true, operations, negate, paths...) + return s.getFileAccessPolicy(operations, negate, paths...) } func (s *DeploymentDetectionTestSuite) getDeploymentFileAccessPolicy(paths ...string) *storage.Policy { - return s.getFileAccessPolicy(true, nil, false, paths...) + return s.getFileAccessPolicy(nil, false, paths...) } func (s *DeploymentDetectionTestSuite) getEffectiveFileAccessPolicyWithOperations(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - return s.getFileAccessPolicy(false, operations, negate, paths...) + return s.getFileAccessPolicy(operations, negate, paths...) } func (s *DeploymentDetectionTestSuite) getEffectiveFileAccessPolicy(paths ...string) *storage.Policy { - return s.getFileAccessPolicy(false, nil, false, paths...) + return s.getFileAccessPolicy(nil, false, paths...) } // Helper to create file access events with BOTH actual path and effective path populated @@ -1107,12 +1111,8 @@ func (s *DeploymentDetectionTestSuite) getDualPathFileAccessEvent(actualPath, ef func (s *DeploymentDetectionTestSuite) getDualPathPolicy(actualPath, effectivePath string, operations []storage.FileAccess_Operation) *storage.Policy { policyGroups := []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, - Values: []*storage.PolicyValue{{Value: actualPath}}, - }, - { - FieldName: fieldnames.EffectivePath, - Values: []*storage.PolicyValue{{Value: effectivePath}}, + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: actualPath}, {Value: effectivePath}}, }, } diff --git a/pkg/booleanpolicy/field_metadata.go b/pkg/booleanpolicy/field_metadata.go index 327742238f848..ea47c3947c449 100644 --- a/pkg/booleanpolicy/field_metadata.go +++ b/pkg/booleanpolicy/field_metadata.go @@ -930,8 +930,8 @@ func initializeFieldMetadata() FieldMetadata { ) if features.SensitiveFileActivity.Enabled() { - f.registerFieldMetadata(fieldnames.ActualPath, - querybuilders.ForFieldLabelExact(search.ActualPath), nil, + f.registerFieldMetadata(fieldnames.FilePath, + querybuilders.ForFieldLabelExact(augmentedobjs.FileAccessPathCustomTag), nil, func(*validateConfiguration) *regexp.Regexp { // TODO(ROX-31449): change to an absolute path regex when arbitrary // paths are supported @@ -941,17 +941,6 @@ func initializeFieldMetadata() FieldMetadata { []RuntimeFieldType{FileAccess}, negationForbidden, ) - f.registerFieldMetadata(fieldnames.EffectivePath, - querybuilders.ForFieldLabelExact(search.EffectivePath), nil, - func(*validateConfiguration) *regexp.Regexp { - // TODO(ROX-31449): change to an absolute path regex when arbitrary - // paths are supported - return allowedFilePathRegex - }, - []storage.EventSource{storage.EventSource_DEPLOYMENT_EVENT}, - []RuntimeFieldType{FileAccess}, negationForbidden, - ) - f.registerFieldMetadata(fieldnames.FileOperation, querybuilders.ForFieldLabel(search.FileOperation), nil, func(*validateConfiguration) *regexp.Regexp { diff --git a/pkg/booleanpolicy/fieldnames/list.go b/pkg/booleanpolicy/fieldnames/list.go index 04c1a4c9b72c7..8d221a60af76d 100644 --- a/pkg/booleanpolicy/fieldnames/list.go +++ b/pkg/booleanpolicy/fieldnames/list.go @@ -7,7 +7,6 @@ var ( // This block enumerates all known field names. // Please keep in alphabetical order. var ( - ActualPath = newFieldName("Actual Path") AddCaps = newFieldName("Add Capabilities") AllowPrivilegeEscalation = newFieldName("Allow Privilege Escalation") AppArmorProfile = newFieldName("AppArmor Profile") @@ -27,12 +26,12 @@ var ( DisallowedImageLabel = newFieldName("Disallowed Image Label") DockerfileLine = newFieldName("Dockerfile Line") DropCaps = newFieldName("Drop Capabilities") - EffectivePath = newFieldName("Effective Path") EnvironmentVariable = newFieldName("Environment Variable") ExposedNodePort = newFieldName("Exposed Node Port") ExposedPort = newFieldName("Exposed Port") ExposedPortProtocol = newFieldName("Exposed Port Protocol") FileOperation = newFieldName("File Operation") + FilePath = newFieldName("File Path") Fixable = newFieldName("Fixable") FixedBy = newFieldName("Fixed By") HasIngressNetworkPolicy = newFieldName("Has Ingress Network Policy") diff --git a/pkg/booleanpolicy/node_policies_test.go b/pkg/booleanpolicy/node_policies_test.go index d7eeaabd790ab..ac1443ee8c03b 100644 --- a/pkg/booleanpolicy/node_policies_test.go +++ b/pkg/booleanpolicy/node_policies_test.go @@ -263,6 +263,22 @@ func (s *NodeDetectionTestSuite) TestNodeFileAccess() { }, }, }, + { + description: "Node file policy with event containing both matching paths", + policy: s.getNodeFileAccessPolicy("/etc/passwd"), + events: []eventWrapper{ + { + access: &storage.FileAccess{ + File: &storage.FileAccess_File{ + ActualPath: "/etc/passwd", + EffectivePath: "/etc/passwd", + }, + Operation: storage.FileAccess_OPEN, + }, + expectAlert: true, + }, + }, + }, } { testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, true) defer testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, false) @@ -330,7 +346,7 @@ func (s *NodeDetectionTestSuite) getNodeFileAccessPolicyWithOperations(operation SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: pathValues, }, { @@ -365,7 +381,7 @@ func (s *NodeDetectionTestSuite) getNodeFileAccessPolicy(paths ...string) *stora SectionName: "section 1", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: "Actual Path", + FieldName: fieldnames.FilePath, Values: policyValues, }, }, diff --git a/pkg/booleanpolicy/validate.go b/pkg/booleanpolicy/validate.go index c1b8a801ac5c8..ae09f38825b10 100644 --- a/pkg/booleanpolicy/validate.go +++ b/pkg/booleanpolicy/validate.go @@ -23,8 +23,7 @@ var ( // requiring the key to also exist. fieldDependencies = map[string]set.StringSet{ fieldnames.FileOperation: set.NewStringSet( - fieldnames.ActualPath, - fieldnames.EffectivePath, + fieldnames.FilePath, ), fieldnames.KubeUserName: set.NewStringSet( fieldnames.KubeResource, @@ -45,7 +44,7 @@ var ( // node events. In the future, when more node events are supported, // this constraint can be relaxed. storage.EventSource_NODE_EVENT: set.NewStringSet( - fieldnames.ActualPath, + fieldnames.FilePath, ), } ) diff --git a/pkg/booleanpolicy/validate_test.go b/pkg/booleanpolicy/validate_test.go index db3dad980742e..d2ada653c6327 100644 --- a/pkg/booleanpolicy/validate_test.go +++ b/pkg/booleanpolicy/validate_test.go @@ -655,7 +655,7 @@ func (s *PolicyValueValidator) TestValidateFileOperationRequiresFilePath() { SectionName: "bad2", PolicyGroups: []*storage.PolicyGroup{ { - FieldName: fieldnames.ActualPath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, }, }, @@ -676,28 +676,7 @@ func (s *PolicyValueValidator) TestValidateFileOperationRequiresFilePath() { Values: []*storage.PolicyValue{{Value: "CREATE"}}, }, { - FieldName: fieldnames.ActualPath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - }, - }, - }, - })) - - s.NoError(Validate(&storage.Policy{ - Name: "Valid Section with Effective Path", - PolicyVersion: policyversion.CurrentVersion().String(), - EventSource: storage.EventSource_DEPLOYMENT_EVENT, - PolicySections: []*storage.PolicySection{ - { - SectionName: "good", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "CREATE"}}, - }, - { - FieldName: fieldnames.EffectivePath, + FieldName: fieldnames.FilePath, Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, }, }, @@ -721,21 +700,4 @@ func (s *PolicyValueValidator) TestValidateFileOperationRequiresFilePath() { }, }, })) - - s.Error(Validate(&storage.Policy{ - Name: "Effective Path with NODE_EVENT should error", - PolicyVersion: policyversion.CurrentVersion().String(), - EventSource: storage.EventSource_NODE_EVENT, - PolicySections: []*storage.PolicySection{ - { - SectionName: "bad", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.EffectivePath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - }, - }, - }, - })) } diff --git a/pkg/booleanpolicy/violationmessages/printer.go b/pkg/booleanpolicy/violationmessages/printer.go index a0c05b6782c0e..34a72e30b3bb5 100644 --- a/pkg/booleanpolicy/violationmessages/printer.go +++ b/pkg/booleanpolicy/violationmessages/printer.go @@ -90,7 +90,7 @@ var ( requiredKubeEventFields = set.NewFrozenStringSet(augmentedobjs.KubernetesAPIVerbCustomTag, augmentedobjs.KubernetesResourceCustomTag) requiredNetworkFlowFields = set.NewFrozenStringSet(augmentedobjs.NotInNetworkBaselineCustomTag) requiredNetworkPolicyFields = set.NewFrozenStringSet(augmentedobjs.HasEgressPolicyCustomTag, augmentedobjs.HasIngressPolicyCustomTag) - requiredFileAccessFields = set.NewFrozenStringSet(search.ActualPath.String(), search.EffectivePath.String(), search.FileOperation.String()) + requiredFileAccessFields = set.NewFrozenStringSet(augmentedobjs.FileAccessPathCustomTag, search.FileOperation.String()) ) func containsAllRequiredFields(fieldMap map[string][]string, required set.StringSet) bool { diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx index db1f562af2404..aa36bafe6a208 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaDescriptors.tsx @@ -1506,23 +1506,9 @@ export const policyCriteriaDescriptors: Descriptor[] = [ lifecycleStages: ['DEPLOY', 'RUNTIME'], }, { - label: 'Effective path', - name: 'Effective Path', - shortName: 'Effective path', - longName: 'The file path as observed by the process', - category: policyCriteriaCategories.FILE_ACTIVITY, - type: 'select', - placeholder: 'Select a file path', - options: fileActivityPathOptions, - canBooleanLogic: false, - lifecycleStages: ['RUNTIME'], - featureFlagDependency: ['ROX_SENSITIVE_FILE_ACTIVITY'], - }, - { - label: 'Actual path', - name: 'Actual Path', - shortName: 'Actual path', - longName: 'The file path on the filesystem or volume mount', + label: 'File path', + name: 'File Path', + shortName: 'File path', category: policyCriteriaCategories.FILE_ACTIVITY, type: 'select', placeholder: 'Select a file path', @@ -1666,9 +1652,9 @@ export const auditLogDescriptor: Descriptor[] = [ export const nodeEventDescriptor: Descriptor[] = [ { - label: 'Actual path', - name: 'Actual Path', - shortName: 'Actual path', + label: 'File path', + name: 'File Path', + shortName: 'File path', category: policyCriteriaCategories.FILE_ACTIVITY, type: 'select', placeholder: 'Select a file path', diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.test.ts b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.test.ts index c89fee26bc9e0..f172e39163d0b 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.test.ts +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.test.ts @@ -109,103 +109,60 @@ describe('policyCriteriaValidators', () => { }); }); - describe('File operation requires mounted file path (Deploy) validator', () => { + describe('File operation requires file path validator', () => { const validator = policySectionValidators.find( - (v) => v.name === 'File operation requires file path (Deploy)' + (v) => v.name === 'File operation requires file path' ); if (!validator) { - throw new Error('File operation requires file path (Deploy) validator not found'); + throw new Error('File operation requires file path validator not found'); } - const context: PolicyContext = { - eventSource: 'DEPLOYMENT_EVENT', - lifecycleStages: ['RUNTIME'], - }; - - it('should only apply to DEPLOYMENT_EVENT with RUNTIME lifecycle stage', () => { + it('should only apply to DEPLOYMENT_EVENT or NODE_EVENT with RUNTIME lifecycle stage', () => { policyEventSources.forEach((eventSource) => { expect( validator.appliesTo({ eventSource, lifecycleStages: ['RUNTIME'], }) - ).toBe(eventSource === 'DEPLOYMENT_EVENT'); + ).toBe(eventSource === 'DEPLOYMENT_EVENT' || eventSource === 'NODE_EVENT'); }); }); - it('should fail when File Operation is present but Effective Path is missing', () => { - const section: ClientPolicySection = { - sectionName: 'Test Section', - policyGroups: [mockCriterionWithName('File Operation', [{ value: 'CREATE' }])], - }; - const error = validator.validate(section, context); - expect(error).toBeDefined(); - }); + describe.each(policyEventSources)( + 'should pass when File Operation and a path criterion are both present with values for %s event source', + (eventSource) => { + if (eventSource !== 'DEPLOYMENT_EVENT' && eventSource !== 'NODE_EVENT') { + it.skip(`should not apply to ${eventSource} event source`); + } - it('should pass when File Operation and Effective Path both present with values', () => { - const section: ClientPolicySection = { - sectionName: 'Test Section', - policyGroups: [ - mockCriterionWithName('File Operation', [{ value: 'CREATE' }]), - mockCriterionWithName('Effective Path', [{ value: '/etc/passwd' }]), - ], - }; - expect(validator.validate(section, context)).toBeUndefined(); - }); - }); + const context: PolicyContext = { + eventSource, + lifecycleStages: ['RUNTIME'], + }; - describe('File operation requires node file path (Node) validator', () => { - const validator = policySectionValidators.find( - (v) => v.name === 'File operation requires file path (Node)' + it('should pass when File Operation and File Path both present with values', () => { + const section: ClientPolicySection = { + sectionName: 'Test Section', + policyGroups: [ + mockCriterionWithName('File Operation', [{ value: 'CREATE' }]), + mockCriterionWithName('File Path', [{ value: '/etc/passwd' }]), + ], + }; + expect(validator.validate(section, context)).toBeUndefined(); + }); + + it('should fail when File Operation is present but File Path is missing', () => { + const section: ClientPolicySection = { + sectionName: 'Test Section', + policyGroups: [ + mockCriterionWithName('File Operation', [{ value: 'CREATE' }]), + ], + }; + const error = validator.validate(section, context); + expect(error).toBeDefined(); + }); + } ); - - if (!validator) { - throw new Error('File operation requires file path (Node) validator not found'); - } - - const context: PolicyContext = { - eventSource: 'NODE_EVENT', - lifecycleStages: ['RUNTIME'], - }; - - it('should only apply to NODE_EVENT with RUNTIME lifecycle stage', () => { - policyEventSources.forEach((eventSource) => { - expect( - validator.appliesTo({ - eventSource, - lifecycleStages: ['RUNTIME'], - }) - ).toBe(eventSource === 'NODE_EVENT'); - }); - }); - - it('should pass when File Operation is not present', () => { - const section: ClientPolicySection = { - sectionName: 'Test Section', - policyGroups: [mockCriterionWithName('Some Other Criterion')], - }; - expect(validator.validate(section, context)).toBeUndefined(); - }); - - it('should fail when File Operation is present but Actual Path is missing', () => { - const section: ClientPolicySection = { - sectionName: 'Test Section', - policyGroups: [mockCriterionWithName('File Operation', [{ value: 'CREATE' }])], - }; - const error = validator.validate(section, context); - expect(error).toBeDefined(); - }); - - it('should pass when File Operation and Actual Path both present with values', () => { - const section: ClientPolicySection = { - sectionName: 'Test Section', - policyGroups: [ - mockCriterionWithName('File Operation', [{ value: 'CREATE' }]), - mockCriterionWithName('Actual Path', [{ value: '/etc/passwd' }]), - ], - }; - expect(validator.validate(section, context)).toBeUndefined(); - }); }); }); diff --git a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.ts b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.ts index b435d627a126e..395ce53a40174 100644 --- a/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.ts +++ b/ui/apps/platform/src/Containers/Policies/Wizard/Step3/policyCriteriaValidators.ts @@ -43,31 +43,16 @@ export const policySectionValidators: PolicySectionValidator[] = [ }, }, { - name: 'File operation requires file path (Deploy)', + name: 'File operation requires file path', appliesTo: (context) => context.lifecycleStages.includes('RUNTIME') && - context.eventSource === 'DEPLOYMENT_EVENT', + (context.eventSource === 'NODE_EVENT' || context.eventSource === 'DEPLOYMENT_EVENT'), validate: ({ policyGroups }) => { const hasFileOperation = policyGroupsHasCriterion(policyGroups, 'File Operation'); - const hasEffectivePath = policyGroupsHasCriterion(policyGroups, 'Effective Path'); - const hasActualPath = policyGroupsHasCriterion(policyGroups, 'Actual Path'); + const hasFilePath = policyGroupsHasCriterion(policyGroups, 'File Path'); - if (hasFileOperation && !hasEffectivePath && !hasActualPath) { - return 'Criterion must be present with at least one value when using File operation: Effective Path or Actual Path'; - } - return undefined; - }, - }, - { - name: 'File operation requires file path (Node)', - appliesTo: (context) => - context.lifecycleStages.includes('RUNTIME') && context.eventSource === 'NODE_EVENT', - validate: ({ policyGroups }) => { - const hasFileOperation = policyGroupsHasCriterion(policyGroups, 'File Operation'); - const hasActualPath = policyGroupsHasCriterion(policyGroups, 'Actual Path'); - - if (hasFileOperation && !hasActualPath) { - return 'Criterion must be present with at least one value when using File operation: Actual Path'; + if (hasFileOperation && !hasFilePath) { + return 'Criterion must be present with at least one value when using File operation: File Path'; } return undefined; }, From c42b7fe9beeeb323beb83c14827511c6fc15ddf2 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Mon, 16 Feb 2026 16:57:39 +0100 Subject: [PATCH 200/232] ROX-32943: add pkg/coalescer (#18807) --- pkg/coalescer/coalescer.go | 49 +++++++++++++++++ pkg/coalescer/coalescer_test.go | 93 +++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 pkg/coalescer/coalescer.go create mode 100644 pkg/coalescer/coalescer_test.go diff --git a/pkg/coalescer/coalescer.go b/pkg/coalescer/coalescer.go new file mode 100644 index 0000000000000..b7d808fa7a553 --- /dev/null +++ b/pkg/coalescer/coalescer.go @@ -0,0 +1,49 @@ +package coalescer + +import ( + "context" + + "golang.org/x/sync/singleflight" +) + +// Coalescer coalesces concurrent calls with the same key into a single execution, +// respecting context cancellation for each caller independently. +// +// Unlike raw singleflight.Group, Coalescer: +// - Respects per-caller context deadlines (callers can fail fast). +// - Returns typed results without requiring type assertions. +type Coalescer[T any] struct { + group singleflight.Group +} + +// New creates a new Coalescer instance. +func New[T any]() *Coalescer[T] { + return &Coalescer[T]{} +} + +// Coalesce executes fn for the given key, coalescing concurrent calls. +// If the context is cancelled while waiting, the context error is returned. +// The underlying function continues executing for other waiters. +func (c *Coalescer[T]) Coalesce(ctx context.Context, key string, fn func() (T, error)) (T, error) { + ch := c.group.DoChan(key, func() (interface{}, error) { + return fn() + }) + + select { + case <-ctx.Done(): + var zero T + return zero, ctx.Err() + case result := <-ch: + if result.Err != nil { + var zero T + return zero, result.Err + } + return result.Val.(T), nil + } +} + +// Forget tells the coalescer to forget about a key, allowing a new call to start. +// This is useful for cache invalidation scenarios. +func (c *Coalescer[T]) Forget(key string) { + c.group.Forget(key) +} diff --git a/pkg/coalescer/coalescer_test.go b/pkg/coalescer/coalescer_test.go new file mode 100644 index 0000000000000..23d987b68815a --- /dev/null +++ b/pkg/coalescer/coalescer_test.go @@ -0,0 +1,93 @@ +package coalescer + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCoalescer_ContextCancellation(t *testing.T) { + c := New[string]() + barrier := make(chan struct{}) + defer close(barrier) + fnStarted := make(chan struct{}) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-fnStarted + cancel() + }() + + _, err := c.Coalesce(ctx, "key", func() (string, error) { + close(fnStarted) + <-barrier // Block forever + return "result", nil + }) + + assert.ErrorIs(t, err, context.Canceled) +} + +func TestCoalescer_ContextDeadlineExceeded(t *testing.T) { + c := New[string]() + barrier := make(chan struct{}) + defer close(barrier) + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) + defer cancel() + + _, err := c.Coalesce(ctx, "key", func() (string, error) { + <-barrier // Block forever + return "result", nil + }) + + assert.ErrorIs(t, err, context.DeadlineExceeded) +} + +func TestCoalescer_TypedResults(t *testing.T) { + type customType struct { + value int + name string + } + + c := New[*customType]() + + result, err := c.Coalesce(context.Background(), "key", func() (*customType, error) { + return &customType{value: 42, name: "test"}, nil + }) + + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, 42, result.value) + assert.Equal(t, "test", result.name) +} + +func TestCoalescer_NilResult(t *testing.T) { + type customType struct{} + + c := New[*customType]() + + result, err := c.Coalesce(context.Background(), "key", func() (*customType, error) { + return nil, nil + }) + + require.NoError(t, err) + assert.Nil(t, result) +} + +func TestCoalescer_ErrorPropagation(t *testing.T) { + c := New[string]() + + expectedErr := errors.New("test error from coalesced function") + + result, err := c.Coalesce(context.Background(), "key", func() (string, error) { + return "", expectedErr + }) + + assert.ErrorIs(t, err, expectedErr) + assert.Empty(t, result) +} From da6c4c757efee65cca1a553b97145becb40738ea Mon Sep 17 00:00:00 2001 From: David House <105243888+davdhacs@users.noreply.github.com> Date: Mon, 16 Feb 2026 17:22:06 -0700 Subject: [PATCH 201/232] fix(ci): save gha caches only on master (#18984) --- .../actions/cache-go-dependencies/action.yaml | 40 +++++++++++++++---- .../actions/cache-ui-dependencies/action.yaml | 12 ++++++ .../emailsender-central-compatibility.yaml | 8 +--- .../scanner-db-integration-tests.yaml | 2 + .github/workflows/style.yaml | 18 ++++++++- .github/workflows/unit-tests.yaml | 11 +++++ 6 files changed, 75 insertions(+), 16 deletions(-) diff --git a/.github/actions/cache-go-dependencies/action.yaml b/.github/actions/cache-go-dependencies/action.yaml index 8592329c7a4f1..ce22b635b7182 100644 --- a/.github/actions/cache-go-dependencies/action.yaml +++ b/.github/actions/cache-go-dependencies/action.yaml @@ -1,5 +1,10 @@ name: Cache Go Dependencies description: Cache Go Dependencies +inputs: + save: + description: Whether this job saves caches on pushes to the default branch. + required: false + default: 'true' runs: using: composite steps: @@ -9,21 +14,42 @@ runs: echo "GOCACHE=$(go env GOCACHE)" >> "$GITHUB_OUTPUT" echo "GOMODCACHE=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT" echo "GOARCH=$(go env GOARCH)" >> "$GITHUB_OUTPUT" + echo "TAG=$(date +%Yw%U)" >> "$GITHUB_OUTPUT" shell: bash - - name: Cache Go Dependencies + # Save caches only on pushes to the default branch. + # All other events (PRs, etc.) restore only. + - name: Cache Go Dependencies (save) + if: inputs.save == 'true' && (github.event_name == 'push' && github.ref_name == github.event.repository.default_branch) uses: actions/cache@v5 with: - path: | - ${{ steps.cache-paths.outputs.GOMODCACHE }} + path: ${{ steps.cache-paths.outputs.GOMODCACHE }} key: go-mod-v1-${{ hashFiles('**/go.sum') }} + restore-keys: go-mod-v1- - - name: Cache Go Build + - name: Cache Go Dependencies (restore) + if: ${{ !(inputs.save == 'true' && (github.event_name == 'push' && github.ref_name == github.event.repository.default_branch)) }} + uses: actions/cache/restore@v5 + with: + path: ${{ steps.cache-paths.outputs.GOMODCACHE }} + key: go-mod-v1-${{ hashFiles('**/go.sum') }} + restore-keys: go-mod-v1- + + - name: Cache Go Build (save) + if: inputs.save == 'true' && (github.event_name == 'push' && github.ref_name == github.event.repository.default_branch) uses: actions/cache@v5 with: - path: | - ${{ steps.cache-paths.outputs.GOCACHE }} - key: go-build-v1-${{ github.job }}-${{ steps.cache-paths.outputs.GOARCH }}-${{ hashFiles('**/go.sum') }} + path: ${{ steps.cache-paths.outputs.GOCACHE }} + key: go-build-v1-${{ github.job }}-${{ steps.cache-paths.outputs.GOARCH }}-${{ steps.cache-paths.outputs.TAG }} + restore-keys: go-build-v1-${{ github.job }}-${{ steps.cache-paths.outputs.GOARCH }}- + + - name: Cache Go Build (restore) + if: ${{ !(inputs.save == 'true' && (github.event_name == 'push' && github.ref_name == github.event.repository.default_branch)) }} + uses: actions/cache/restore@v5 + with: + path: ${{ steps.cache-paths.outputs.GOCACHE }} + key: go-build-v1-${{ github.job }}-${{ steps.cache-paths.outputs.GOARCH }}-${{ steps.cache-paths.outputs.TAG }} + restore-keys: go-build-v1-${{ github.job }}-${{ steps.cache-paths.outputs.GOARCH }}- - name: Download Go modules run: make deps --always-make diff --git a/.github/actions/cache-ui-dependencies/action.yaml b/.github/actions/cache-ui-dependencies/action.yaml index 908105a981e0b..b8340087a2d3b 100644 --- a/.github/actions/cache-ui-dependencies/action.yaml +++ b/.github/actions/cache-ui-dependencies/action.yaml @@ -9,6 +9,7 @@ runs: using: composite steps: - name: Cache UI Dependencies + if: github.event_name == 'push' && github.ref_name == github.event.repository.default_branch uses: actions/cache@v5 with: path: | @@ -16,3 +17,14 @@ runs: /github/home/.cache/Cypress /usr/local/share/.cache key: npm-v2-${{ hashFiles(inputs.lockFile) }} + + - name: Restore UI Dependencies + if: ${{ !(github.event_name == 'push' && github.ref_name == github.event.repository.default_branch) }} + uses: actions/cache/restore@v5 + with: + path: | + /github/home/.npm + /github/home/.cache/Cypress + /usr/local/share/.cache + key: npm-v2-${{ hashFiles(inputs.lockFile) }} + restore-keys: npm-v2- diff --git a/.github/workflows/emailsender-central-compatibility.yaml b/.github/workflows/emailsender-central-compatibility.yaml index 9de49411e5612..7aac683b8580b 100644 --- a/.github/workflows/emailsender-central-compatibility.yaml +++ b/.github/workflows/emailsender-central-compatibility.yaml @@ -57,13 +57,7 @@ jobs: uses: actions/setup-go@v6 with: go-version-file: stackrox/go.mod - - name: Cache go module - uses: actions/cache@v5 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('stackrox/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- + cache: false # repo checkout is in stackrox/ subdir, setup-go can't find go.sum - uses: ./stackrox/.github/actions/job-preamble with: free-disk-space: 40 diff --git a/.github/workflows/scanner-db-integration-tests.yaml b/.github/workflows/scanner-db-integration-tests.yaml index 26e0f1d4969f4..50a07649c9206 100644 --- a/.github/workflows/scanner-db-integration-tests.yaml +++ b/.github/workflows/scanner-db-integration-tests.yaml @@ -35,6 +35,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~62 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index df239fd819858..df9e2ca2152e9 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -41,6 +41,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~752 MB, style-dominated, no critical path speedup - name: Check Generated run: scripts/ci/jobs/check-generated.sh @@ -109,6 +111,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~2,183 MB, style-dominated, no critical path speedup - name: Cache UI dependencies uses: ./.github/actions/cache-ui-dependencies @@ -136,17 +140,27 @@ jobs: - uses: actions/setup-go@v6 with: go-version-file: 'tools/linters/go.mod' + cache: false # ~3,354 MB via setup-go; disabled to stay within 10 GB budget - name: Check Cache golangci-lint run: make golangci-lint-cache-status - name: Cache golangci-lint + if: github.event_name == 'push' && github.ref_name == github.event.repository.default_branch uses: actions/cache@v5 with: - path: /github/home/.cache/golangci-lint + path: ${{ env.HOME }}/.cache/golangci-lint + key: go-lint-v2-${{ hashFiles('**/go.sum') }} + restore-keys: | + go-lint-v2- + + - name: Restore golangci-lint analysis cache + if: ${{ !(github.event_name == 'push' && github.ref_name == github.event.repository.default_branch) }} + uses: actions/cache/restore@v5 + with: + path: ${{ env.HOME }}/.cache/golangci-lint key: go-lint-v2-${{ hashFiles('**/go.sum') }} restore-keys: | - go-lint-v2-${{ hashFiles('**/go.sum') }} go-lint-v2- - name: Check cache golangci-lint diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index 8dc7956c9ad71..60bb610c9fc5b 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -42,6 +42,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~2,000 MB, test-dominated, no critical path speedup - name: Go Unit Tests run: ${{ matrix.gotags }} make go-unit-tests @@ -122,6 +124,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~913 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -182,6 +186,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~610 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -303,6 +309,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~1,163 MB, test-dominated, no critical path speedup - uses: ./.github/actions/handle-tagged-build @@ -395,6 +403,7 @@ jobs: uses: actions/setup-go@v6 with: go-version-file: go.mod + cache: false - uses: ./.github/actions/job-preamble with: @@ -402,6 +411,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~626 MB, test-dominated, no critical path speedup - name: Login to Quay.io uses: docker/login-action@v3 From f70f20fd6ff0d838b22ada02e770badea94ac6a7 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Tue, 17 Feb 2026 06:21:29 +0100 Subject: [PATCH 202/232] ROX-32012: Clean up remains of upstream OLM (#18938) --- .openshift-ci/ci_tests.py | 2 +- operator/Makefile | 10 ------ operator/README.md | 33 ++++++++--------- operator/tools/operator-sdk/go.mod | 34 +----------------- operator/tools/operator-sdk/go.sum | 55 ++++++----------------------- operator/tools/operator-sdk/tool.go | 1 - 6 files changed, 29 insertions(+), 106 deletions(-) diff --git a/.openshift-ci/ci_tests.py b/.openshift-ci/ci_tests.py index 7dae063a04b0c..c5e5ad6bacf53 100755 --- a/.openshift-ci/ci_tests.py +++ b/.openshift-ci/ci_tests.py @@ -74,7 +74,7 @@ def run(self): class OperatorE2eTest(BaseTest): - OLM_SETUP_TIMEOUT_SEC = 60 * 10 + OLM_SETUP_TIMEOUT_SEC = 60 * 2 TEST_TIMEOUT_SEC = 60 * 60 * 2 OPERATOR_CLUSTER_TYPE_OPENSHIFT4 = "openshift4" diff --git a/operator/Makefile b/operator/Makefile index c81bfd46c8793..cc3b0777593fe 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -193,7 +193,6 @@ include $(PROJECT_DIR)/../make/gotools.mk $(call go-tool, CONTROLLER_GEN, sigs.k8s.io/controller-tools/cmd/controller-gen, tools/controller-gen) $(call go-tool, ENVTEST, sigs.k8s.io/controller-runtime/tools/setup-envtest, tools/envtest) $(call go-tool, KUSTOMIZE, sigs.k8s.io/kustomize/kustomize/v5, tools/kustomize) -$(call go-tool, OLM, github.com/operator-framework/operator-lifecycle-manager/cmd/olm, tools/operator-sdk) $(call go-tool, KUTTL, github.com/kudobuilder/kuttl/cmd/kubectl-kuttl, tools/kuttl) $(call go-tool, YQ, github.com/mikefarah/yq/v4, tools/yq) @@ -205,7 +204,6 @@ $(OPERATOR_SDK): tools/operator-sdk/go.mod tools/operator-sdk/go.sum $(SILENT)cd tools/operator-sdk && GOBIN="$(dir $@)" go install -tags=containers_image_openpgp github.com/operator-framework/operator-sdk/cmd/operator-sdk OPERATOR_SDK_VERSION = $(shell cd tools/operator-sdk; go list -m -f '{{ .Version }}' github.com/operator-framework/operator-sdk) -OLM_VERSION = $(shell cd tools/operator-sdk; go list -m -f '{{ .Version }}' github.com/operator-framework/operator-lifecycle-manager) .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -359,14 +357,6 @@ ifndef ignore-not-found ignore-not-found = false endif -.PHONY: olm-install -olm-install: operator-sdk ## Install OLM on Kubernetes cluster - $(OPERATOR_SDK) olm install --version=$(OLM_VERSION) - -.PHONY: olm-uninstall -olm-uninstall: operator-sdk ## Uninstall OLM previously installed on Kubernetes cluster - $(OPERATOR_SDK) olm uninstall - .PHONY: install install: check-ci-setup manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. $(KUSTOMIZE) build config/crd | $(PROJECT_DIR)/hack/retry-kubectl.sh apply -f - diff --git a/operator/README.md b/operator/README.md index 81f980d6d3c10..1db489a5fc8ff 100644 --- a/operator/README.md +++ b/operator/README.md @@ -143,6 +143,10 @@ You need to have the following set before running most targets mentioned in this $ export ROX_PRODUCT_BRANDING=RHACS_BRANDING ``` +Also, as of early 2026, upstream OLM does not work on GKE out of the box due to network policy issues +[(Slack thread)](https://kubernetes.slack.com/archives/CAW0GV7A5/p1763021938129309). +Use an OpenShift cluster, which comes with OLM pre-installed. + ```bash # Refresh bundle metadata. Make sure to check the diff and commit it. $ make bundle @@ -175,10 +179,7 @@ $ make bundle-test-image # 0. Get the operator-sdk program. $ make operator-sdk -# 1. Install OLM, unless running on OpenShift. -$ make olm-install - -# 2. Create a namespace for testing bundle. +# 1. Create a namespace for testing bundle. $ kubectl create ns bundle-test # 2. Create image pull secrets. @@ -192,17 +193,17 @@ $ kubectl -n bundle-test create secret docker-registry my-opm-image-pull-secrets # 3. Configure default service account to use these pull secrets. $ kubectl -n bundle-test patch serviceaccount default -p '{"imagePullSecrets": [{"name": "my-opm-image-pull-secrets"}]}' -# 3. Build and push operator and bundle images. +# 4. Build and push operator and bundle images. # Use one-liner above. -# 4. Run bundle. +# 5. Run bundle. $ `make which-operator-sdk` run bundle \ quay.io/rhacs-eng/stackrox-operator-bundle:v$(make --quiet --no-print-directory tag) \ --pull-secret-name my-opm-image-pull-secrets \ --service-account default \ --namespace bundle-test -# 5. Add image pull secrets to operator's ServiceAccount. +# 6. Add image pull secrets to operator's ServiceAccount. # Run it while the previous command executes otherwise it will fail. # Note that serviceaccount might not exist for a few moments. # Rerun this command until it succeeds. @@ -211,7 +212,7 @@ $ kubectl -n bundle-test patch serviceaccount rhacs-operator-controller-manager # You may need to bounce operator pods after this if they can't pull images for a while. $ kubectl -n bundle-test delete pod -l app=rhacs-operator -# 6. The above operator-sdk run bundle command should complete successfully. +# 7. The above operator-sdk run bundle command should complete successfully. # If it does not, watch pod statuses and check pod logs. $ kubectl -n bundle-test get pods # ... and dive deep from there into the ones that are not healthy. @@ -230,7 +231,6 @@ kubectl -n bundle-test delete catalogsources.operators.coreos.com rhacs-operator Also, you can tear everything down with ```bash -$ make olm-uninstall $ kubectl delete ns bundle-test ``` @@ -244,21 +244,23 @@ Instructions and best practices on how to extend the StackRox CRDs is contained These instructions are for deploying a version of the operator that has been pushed to the `rhacs-eng` Quay organization. See above for instructions on how to deploy an OLM bundle and index that was built locally. +Note: as of early 2026, upstream OLM does not work on GKE out of the box due to network policy issues +[(Slack thread)](https://kubernetes.slack.com/archives/CAW0GV7A5/p1763021938129309). +Use an OpenShift cluster, which comes with OLM pre-installed. + ### Prerequisites #### Required Binaries -Both the `kubectl-kuttl` and `operator-sdk` binaries are required for the following make targets to work. -There are make targets to install both executables: +The `kubectl-kuttl` binary is required for the following make targets to work. +There is a make target to install it: ```bash -make operator-sdk make kuttl ``` -These make targets will add the executable to your `$GOPATH`. -If that is not on your `$PATH`, then you can install the Operator SDK from its [release page](https://github.com/operator-framework/operator-sdk/releases) -and kuttl from its [release page](https://github.com/kudobuilder/kuttl/releases). +This make target will add the executable to your `$GOPATH`. +If that is not on your `$PATH`, then you can install kuttl from its [release page](https://github.com/kudobuilder/kuttl/releases). #### Pull Secret @@ -309,7 +311,6 @@ ROX_PRODUCT_BRANDING=RHACS_BRANDING make deploy-via-olm TEST_NAMESPACE=my-favori You can blow everything away with: ```bash -$ make olm-uninstall $ kubectl delete ns stackrox-operator-system # Optionally remove CRDs diff --git a/operator/tools/operator-sdk/go.mod b/operator/tools/operator-sdk/go.mod index a0d5e3f57c484..a5ec032eac1ad 100644 --- a/operator/tools/operator-sdk/go.mod +++ b/operator/tools/operator-sdk/go.mod @@ -2,24 +2,7 @@ module github.com/stackrox/rox/operator/tools/operator-sdk go 1.25.3 -require ( - github.com/operator-framework/operator-lifecycle-manager v0.40.0 - github.com/operator-framework/operator-sdk v1.42.0 -) - -// These are copied verbatim from -// https://github.com/operator-framework/operator-lifecycle-manager/blob/72b0467f7a4f136a90089d013ef8d36715193fe2/go.mod#L252 -// Without them "go mod tidy" failed with: -// -// go: finding module for package github.com/openshift/api/config/v1 -// go: github.com/stackrox/rox/operator/tools/operator-sdk imports -// github.com/operator-framework/operator-lifecycle-manager/cmd/olm imports -// github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 imports -// github.com/openshift/api/config/v1: package github.com/openshift/api/config/v1 provided by github.com/openshift/api at latest version v0.0.0-20240508125607-95e22923d553 but not at required version v3.9.0+incompatible -replace ( - github.com/openshift/api => github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 // release-4.12 - github.com/openshift/client-go => github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c // release-4.12 -) +require github.com/operator-framework/operator-sdk v1.42.0 require ( cel.dev/expr v0.25.1 // indirect @@ -58,7 +41,6 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/coreos/go-semver v0.3.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -78,7 +60,6 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-air/gini v1.0.4 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.7.0 // indirect @@ -87,7 +68,6 @@ require ( github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.22.4 // indirect github.com/go-openapi/jsonreference v0.21.4 // indirect github.com/go-openapi/swag v0.25.4 // indirect @@ -129,8 +109,6 @@ require ( github.com/huandu/xstrings v1.5.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/itchyny/gojq v0.12.18 // indirect - github.com/itchyny/timefmt-go v0.1.7 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/joelanford/ignore v0.1.1 // indirect @@ -153,7 +131,6 @@ require ( github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -173,9 +150,6 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.3.0 // indirect - github.com/openshift/api v3.9.0+incompatible // indirect - github.com/openshift/client-go v0.0.0-20260108185524-48f4ccfc4e13 // indirect - github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a // indirect github.com/operator-framework/ansible-operator-plugins v1.42.0 // indirect github.com/operator-framework/api v0.39.0 // indirect github.com/operator-framework/operator-manifest-tools v0.10.0 // indirect @@ -231,8 +205,6 @@ require ( go.podman.io/common v0.66.1 // indirect go.podman.io/image/v5 v5.38.0 // indirect go.podman.io/storage v1.61.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.47.0 // indirect @@ -246,7 +218,6 @@ require ( golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect golang.org/x/tools v0.41.0 // indirect - gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20260203192932-546029d2fa20 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect @@ -265,9 +236,7 @@ require ( k8s.io/cli-runtime v0.35.0 // indirect k8s.io/client-go v0.35.0 // indirect k8s.io/component-base v0.35.0 // indirect - k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-aggregator v0.35.0 // indirect k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 // indirect k8s.io/kubectl v0.35.0 // indirect k8s.io/utils v0.0.0-20260108192941-914a6e750570 // indirect @@ -280,7 +249,6 @@ require ( sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/operator/tools/operator-sdk/go.sum b/operator/tools/operator-sdk/go.sum index 653bdc922a90e..cd7fd509b3b9b 100644 --- a/operator/tools/operator-sdk/go.sum +++ b/operator/tools/operator-sdk/go.sum @@ -85,8 +85,8 @@ github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpV github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA= -github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= @@ -150,10 +150,6 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-air/gini v1.0.4 h1:lteMAxHKNOAjIqazL/klOJJmxq6YxxSuJ17MnMXny+s= -github.com/go-air/gini v1.0.4/go.mod h1:dd8RvT1xcv6N1da33okvBd8DhMh1/A4siGy6ErjTljs= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -166,7 +162,6 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -256,15 +251,12 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef h1:xpF9fUHpoIrrjX24DURVKiwHcFpw19ndIs+FwTSMbno= github.com/google/pprof v0.0.0-20260202012954-cb029daf43ef/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -293,7 +285,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -304,10 +295,6 @@ github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSAS github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/itchyny/gojq v0.12.18 h1:gFGHyt/MLbG9n6dqnvlliiya2TaMMh6FFaR2b1H6Drc= -github.com/itchyny/gojq v0.12.18/go.mod h1:4hPoZ/3lN9fDL1D+aK7DY1f39XZpY9+1Xpjz8atrEkg= -github.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4GMMA= -github.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= @@ -359,8 +346,6 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -405,18 +390,10 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg= github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20221021112143-4226c2167e40 h1:PxjGCA72RtsdHWToZLkjjeWm7WXXx4cuv0u4gtvLbrk= -github.com/openshift/api v0.0.0-20221021112143-4226c2167e40/go.mod h1:aQ6LDasvHMvHZXqLHnX2GRmnfTWCF/iIwz8EMTTIE9A= -github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= -github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= -github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a h1:YLnZtVfqGUfTbQ+M06QAslEmP4WrnRoPrk4AtoBJdm8= -github.com/openshift/library-go v0.0.0-20260204111611-b7d4fa0e292a/go.mod h1:DCRz1EgdayEmr9b6KXKDL+DWBN0rGHu/VYADeHzPoOk= github.com/operator-framework/ansible-operator-plugins v1.42.0 h1:ahupKUXl7sYKILEUp1tiQNW9WiFxpGGyN1UQ/EfsNGY= github.com/operator-framework/ansible-operator-plugins v1.42.0/go.mod h1:gGyNgCrNU1opGioTWbYdnbRTcJkJrFPS8Ysu/hKybnE= github.com/operator-framework/api v0.39.0 h1:9h7aVufeQ+l2ACXJE51hkMFcqrQwJOLM6/vwgGu6tgI= github.com/operator-framework/api v0.39.0/go.mod h1:tcYIwuznZzfo4HKUTu0dbquIHqxiewnKW/ZmhHKzMH8= -github.com/operator-framework/operator-lifecycle-manager v0.40.0 h1:IDR+NNdrghAxVaSy1uEoMLRObylXPdjV1392ZEO3OZI= -github.com/operator-framework/operator-lifecycle-manager v0.40.0/go.mod h1:GkRZehCNOiOAdFrByUIU/W7nG+KWSs77ceHY839bFfg= github.com/operator-framework/operator-manifest-tools v0.10.0 h1:+vtIElvGQ5e43gCD6fF65a0HNH3AD3LGnukUhpl9kjc= github.com/operator-framework/operator-manifest-tools v0.10.0/go.mod h1:eB/wnr0BOhMLNXPeceE+0p3vudP16zDNWP60Hvn3KaM= github.com/operator-framework/operator-registry v1.63.0 h1:UIahnpjkH7y98A8AgPw3DUXVsM1yQr36JajRaJ/00nQ= @@ -534,12 +511,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= -go.etcd.io/etcd/api/v3 v3.6.7 h1:7BNJ2gQmc3DNM+9cRkv7KkGQDayElg8x3X+tFDYS+E0= -go.etcd.io/etcd/api/v3 v3.6.7/go.mod h1:xJ81TLj9hxrYYEDmXTeKURMeY3qEDN24hqe+q7KhbnI= -go.etcd.io/etcd/client/pkg/v3 v3.6.7 h1:vvzgyozz46q+TyeGBuFzVuI53/yd133CHceNb/AhBVs= -go.etcd.io/etcd/client/pkg/v3 v3.6.7/go.mod h1:2IVulJ3FZ/czIGl9T4lMF1uxzrhRahLqe+hSgy+Kh7Q= -go.etcd.io/etcd/client/v3 v3.6.7 h1:9WqA5RpIBtdMxAy1ukXLAdtg2pAxNqW5NUoO2wQrE6U= -go.etcd.io/etcd/client/v3 v3.6.7/go.mod h1:2XfROY56AXnUqGsvl+6k29wrwsSbEh1lAouQB1vHpeE= +go.etcd.io/etcd/api/v3 v3.6.5 h1:pMMc42276sgR1j1raO/Qv3QI9Af/AuyQUW6CBAWuntA= +go.etcd.io/etcd/api/v3 v3.6.5/go.mod h1:ob0/oWA/UQQlT1BmaEkWQzI0sJ1M0Et0mMpaABxguOQ= +go.etcd.io/etcd/client/pkg/v3 v3.6.5 h1:Duz9fAzIZFhYWgRjp/FgNq2gO1jId9Yae/rLn3RrBP8= +go.etcd.io/etcd/client/pkg/v3 v3.6.5/go.mod h1:8Wx3eGRPiy0qOFMZT/hfvdos+DjEaPxdIDiCDUv/FQk= +go.etcd.io/etcd/client/v3 v3.6.5 h1:yRwZNFBx/35VKHTcLDeO7XVLbCBFbPi+XV4OC3QJf2U= +go.etcd.io/etcd/client/v3 v3.6.5/go.mod h1:ZqwG/7TAFZ0BJ0jXRPoJjKQJtbFo/9NIY8uoFFKcCyo= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= @@ -548,8 +525,8 @@ go.opentelemetry.io/contrib/bridges/prometheus v0.65.0 h1:I/7S/yWobR3QHFLqHsJ8QO go.opentelemetry.io/contrib/bridges/prometheus v0.65.0/go.mod h1:jPF6gn3y1E+nozCAEQj3c6NZ8KY+tvAgSVfvoOJUFac= go.opentelemetry.io/contrib/exporters/autoexport v0.65.0 h1:2gApdml7SznX9szEKFjKjM4qGcGSvAybYLBY319XG3g= go.opentelemetry.io/contrib/exporters/autoexport v0.65.0/go.mod h1:0QqAGlbHXhmPYACG3n5hNzO5DnEqqtg4VcK5pr22RI0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0 h1:XmiuHzgJt067+a6kwyAzkhXooYVv3/TOw9cM2VfJgUM= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.65.0/go.mod h1:KDgtbWKTQs4bM+VPUr6WlL9m/WXcmkCcBlIzqxPGzmI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0 h1:7iP2uCb7sGddAr30RRS6xjKy7AZ2JtTOPA3oolgVSw8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.65.0/go.mod h1:c7hN3ddxs/z6q9xwvfLPk+UHlWRQyaeR1LdgfL/66l0= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= @@ -598,8 +575,6 @@ go.podman.io/storage v1.61.0 h1:5hD/oyRYt1f1gxgvect+8syZBQhGhV28dCw2+CZpx0Q= go.podman.io/storage v1.61.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= -go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= @@ -732,8 +707,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= -gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -802,12 +775,8 @@ k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94= k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.35.0 h1:FBtbuRFA7Ohe2QKirFZcJf8rgimC8oSaNiCi4pdU5xw= -k8s.io/kube-aggregator v0.35.0/go.mod h1:vKBRpQUfDryb7udwUwF3eCSvv3AJNgHtL4PGl6PqAg8= k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY= k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= @@ -830,13 +799,9 @@ sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= -sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/operator/tools/operator-sdk/tool.go b/operator/tools/operator-sdk/tool.go index 2fb1474520412..cca40906d44f6 100644 --- a/operator/tools/operator-sdk/tool.go +++ b/operator/tools/operator-sdk/tool.go @@ -3,6 +3,5 @@ package main import ( - _ "github.com/operator-framework/operator-lifecycle-manager/cmd/olm" _ "github.com/operator-framework/operator-sdk/cmd/operator-sdk" ) From 9b2c9021fa5017052c29e8587c8ce29bd3ef3800 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 17 Feb 2026 09:16:54 +0100 Subject: [PATCH 203/232] ROX-32943: use coalescer in centralproxy authorizer (#18810) --- sensor/common/centralproxy/authorizer.go | 206 +++++++++----- sensor/common/centralproxy/authorizer_test.go | 265 ++++++++++++++++++ 2 files changed, 401 insertions(+), 70 deletions(-) diff --git a/sensor/common/centralproxy/authorizer.go b/sensor/common/centralproxy/authorizer.go index ad106dc8a8dba..c0b358bdc67e2 100644 --- a/sensor/common/centralproxy/authorizer.go +++ b/sensor/common/centralproxy/authorizer.go @@ -2,11 +2,14 @@ package centralproxy import ( "context" + "fmt" "net/http" "slices" "strings" "time" + "github.com/pkg/errors" + "github.com/stackrox/rox/pkg/coalescer" "github.com/stackrox/rox/pkg/expiringcache" "github.com/stackrox/rox/pkg/grpc/authn" pkghttputil "github.com/stackrox/rox/pkg/httputil" @@ -21,18 +24,44 @@ import ( const ( stackroxNamespaceHeader = "ACS-AUTH-NAMESPACE-SCOPE" defaultCacheTTL = 3 * time.Minute + // k8sAPITimeout is the maximum time allowed for Kubernetes API calls (TokenReview, SubjectAccessReview). + // This ensures that API calls don't hang indefinitely when all callers have cancelled. + k8sAPITimeout = 30 * time.Second ) -// sarCacheKey uniquely identifies a SubjectAccessReview check for caching. +// authzCacheKey uniquely identifies an authorization check for caching. // It includes UID and groups to avoid over-permissive cache hits when different // tokens for the same username have different group memberships. -type sarCacheKey struct { +type authzCacheKey struct { uid string userGroups string // Joined group names namespace string - verb string - resource string - group string +} + +// String returns a string representation of the cache key for use with singleflight. +func (k authzCacheKey) String() string { + return fmt.Sprintf("%s+%s+%s", k.uid, k.userGroups, k.namespace) +} + +// authzResult wraps the authorization result for caching. +// The zero value (nil error) represents successful authorization. +type authzResult struct { + err error +} + +// k8sResource represents a Kubernetes resource that requires authorization checking. +type k8sResource struct { + Resource string + Group string +} + +// String returns the qualified resource name in "resource.group" format. +// For core API resources (empty group), it returns "resource.core". +func (r k8sResource) String() string { + if r.Group == "" { + return fmt.Sprintf("%s.core", r.Resource) + } + return fmt.Sprintf("%s.%s", r.Resource, r.Group) } // k8sAuthorizer verifies that a bearer token has the required Kubernetes permissions. @@ -42,26 +71,26 @@ type sarCacheKey struct { type k8sAuthorizer struct { client kubernetes.Interface tokenCache expiringcache.Cache[string, *authenticationv1.UserInfo] - sarCache expiringcache.Cache[sarCacheKey, bool] + authzCache expiringcache.Cache[authzCacheKey, authzResult] verbsToCheck []string - resourcesToCheck []struct { - Resource string - Group string - } + resourcesToCheck []k8sResource + // tokenReviewGroup coalesces concurrent authentication requests for the same token. + tokenReviewGroup *coalescer.Coalescer[*authenticationv1.UserInfo] + // authzGroup coalesces concurrent authorization requests for the same user/namespace. + authzGroup *coalescer.Coalescer[authzResult] } // newK8sAuthorizer creates a new Kubernetes-based authorizer with TokenReview and // SubjectAccessReview caching. func newK8sAuthorizer(client kubernetes.Interface) *k8sAuthorizer { return &k8sAuthorizer{ - client: client, - tokenCache: expiringcache.NewExpiringCache[string, *authenticationv1.UserInfo](defaultCacheTTL), - sarCache: expiringcache.NewExpiringCache[sarCacheKey, bool](defaultCacheTTL), - verbsToCheck: []string{"get", "list"}, - resourcesToCheck: []struct { - Resource string - Group string - }{ + client: client, + tokenCache: expiringcache.NewExpiringCache[string, *authenticationv1.UserInfo](defaultCacheTTL), + authzCache: expiringcache.NewExpiringCache[authzCacheKey, authzResult](defaultCacheTTL), + verbsToCheck: []string{"get", "list"}, + tokenReviewGroup: coalescer.New[*authenticationv1.UserInfo](), + authzGroup: coalescer.New[authzResult](), + resourcesToCheck: []k8sResource{ {Resource: "pods", Group: ""}, {Resource: "replicationcontrollers", Group: ""}, {Resource: "daemonsets", Group: "apps"}, @@ -76,37 +105,57 @@ func newK8sAuthorizer(client kubernetes.Interface) *k8sAuthorizer { } // formatForbiddenErr creates a consistent forbidden error message for authorization failures. -func formatForbiddenErr(user, verb, resource, group, namespace string) error { +func formatForbiddenErr(user, verb string, resource k8sResource, namespace string) error { // Uppercase the verb for readability. verb = strings.ToUpper(verb) - // Format as resource.group using "core" for empty group. - qualifiedResource := resource + "." + group - if group == "" { - qualifiedResource = resource + ".core" - } - if namespace == FullClusterAccessScope { return pkghttputil.Errorf( http.StatusForbidden, "user %q lacks cluster-wide %s permission for resource %q", - user, verb, qualifiedResource, + user, verb, resource.String(), ) } return pkghttputil.Errorf( http.StatusForbidden, "user %q lacks %s permission for resource %q in namespace %q", - user, verb, qualifiedResource, namespace, + user, verb, resource.String(), namespace, ) } // authenticate validates the bearer token using TokenReview and returns user information. +// Successful authentications are cached and concurrent requests are coalesced to reduce load +// on the Kubernetes API server. func (a *k8sAuthorizer) authenticate(ctx context.Context, r *http.Request) (*authenticationv1.UserInfo, error) { token, err := extractBearerToken(r) if err != nil { return nil, err } - return a.validateToken(ctx, token) + + // Fast path: check cache first. + if userInfo, ok := a.tokenCache.Get(token); ok { + return userInfo, nil + } + + // Slow path: coalesce concurrent authentication requests for the same token. + return a.tokenReviewGroup.Coalesce(ctx, token, func() (*authenticationv1.UserInfo, error) { //nolint:wrapcheck + // Double-check cache inside coalesce to avoid redundant API calls. + if userInfo, ok := a.tokenCache.Get(token); ok { + return userInfo, nil + } + + // Use a background context with timeout to ensure the shared function is independent + // of the initial request context while still having a bounded lifetime. + ctx, cancel := context.WithTimeout(context.Background(), k8sAPITimeout) + defer cancel() + userInfo, err := a.validateToken(ctx, token) + if err != nil { + return nil, err + } + + a.tokenCache.Add(token, userInfo) + return userInfo, nil + }) } func extractBearerToken(r *http.Request) (string, error) { @@ -119,12 +168,7 @@ func extractBearerToken(r *http.Request) (string, error) { } // validateToken validates the bearer token using TokenReview and returns user information. -// Successful authentications are cached to reduce API calls to the Kubernetes API server. func (a *k8sAuthorizer) validateToken(ctx context.Context, token string) (*authenticationv1.UserInfo, error) { - if userInfo, ok := a.tokenCache.Get(token); ok { - return userInfo, nil - } - tokenReview := &authenticationv1.TokenReview{ Spec: authenticationv1.TokenReviewSpec{ Token: token, @@ -144,7 +188,6 @@ func (a *k8sAuthorizer) validateToken(ctx context.Context, token string) (*authe return nil, pkghttputil.NewError(http.StatusUnauthorized, "token authentication failed") } - a.tokenCache.Add(token, &result.Status.User) return &result.Status.User, nil } @@ -156,7 +199,8 @@ func (a *k8sAuthorizer) validateToken(ctx context.Context, token string) (*authe // - FullClusterAccessScope ("*"): SubjectAccessReview for all namespaces and rox token // with cluster-wide access scope. // -// SAR checks are performed in parallel to reduce latency. +// Successful authorizations are cached and concurrent requests are coalesced to reduce load +// on the Kubernetes API server. func (a *k8sAuthorizer) authorize(ctx context.Context, userInfo *authenticationv1.UserInfo, r *http.Request) error { namespace := r.Header.Get(stackroxNamespaceHeader) // Skip authorization if the namespace header is empty or not set. @@ -164,63 +208,86 @@ func (a *k8sAuthorizer) authorize(ctx context.Context, userInfo *authenticationv return nil } + // Fast path: check cache first. + cacheKey := a.buildAuthzCacheKey(userInfo, namespace) + if cached, ok := a.authzCache.Get(cacheKey); ok { + return cached.err + } + + // Slow path: coalesce concurrent authorization requests for the same user/namespace. + cached, err := a.authzGroup.Coalesce(ctx, cacheKey.String(), func() (authzResult, error) { + // Double-check cache inside coalesce to avoid redundant API calls. + if cached, ok := a.authzCache.Get(cacheKey); ok { + return cached, nil + } + + log.Debugf("Authorization cache miss for user %q (uid=%q) in namespace %q", userInfo.Username, userInfo.UID, namespace) + + // Use a background context with timeout to ensure the shared function is independent + // of the initial request context while still having a bounded lifetime. + ctx, cancel := context.WithTimeout(context.Background(), k8sAPITimeout) + defer cancel() + result := a.checkAllPermissions(ctx, userInfo, namespace) + // Only cache successful authorizations and permission denials (403 Forbidden). + // Transient errors should not be cached so callers can retry. + if result.err == nil || pkghttputil.StatusFromError(result.err) == http.StatusForbidden { + a.authzCache.Add(cacheKey, result) + } + return result, nil + }) + if err != nil { + return err //nolint:wrapcheck + } + return cached.err +} + +// buildAuthzCacheKey creates a cache key for authorization based on user identity and namespace. +func (a *k8sAuthorizer) buildAuthzCacheKey(userInfo *authenticationv1.UserInfo, namespace string) authzCacheKey { + // Sort groups to make the cache key order-independent. + sortedGroups := append([]string(nil), userInfo.Groups...) + slices.Sort(sortedGroups) + + return authzCacheKey{ + uid: userInfo.UID, + userGroups: strings.Join(sortedGroups, "|"), + namespace: namespace, + } +} + +// checkAllPermissions runs all SubjectAccessReview checks in parallel. +func (a *k8sAuthorizer) checkAllPermissions(ctx context.Context, userInfo *authenticationv1.UserInfo, namespace string) authzResult { // Use errgroup with context cancellation to short-circuit on first error/denial. g, groupCtx := errgroup.WithContext(ctx) for _, resource := range a.resourcesToCheck { for _, verb := range a.verbsToCheck { - // Capture loop variables for the goroutine. resource := resource g.Go(func() error { allowed, err := a.performSubjectAccessReview(groupCtx, userInfo, verb, namespace, resource) if err != nil { - return pkghttputil.Errorf(http.StatusInternalServerError, "checking %s permission for %s: %v", verb, resource.Resource, err) + return pkghttputil.Errorf(http.StatusInternalServerError, + "checking %s permission for %q: %v", verb, resource, err) } if !allowed { - return formatForbiddenErr(userInfo.Username, verb, resource.Resource, resource.Group, namespace) + return formatForbiddenErr(userInfo.Username, verb, resource, namespace) } return nil }) } } - if err := g.Wait(); err != nil { - return err //nolint:wrapcheck - } - return nil + return authzResult{err: g.Wait()} } -func (a *k8sAuthorizer) performSubjectAccessReview(ctx context.Context, userInfo *authenticationv1.UserInfo, verb, namespace string, resource struct { - Resource string - Group string -}, -) (bool, error) { - // Sort groups to make the cache key order-independent. - sortedGroups := append([]string(nil), userInfo.Groups...) - slices.Sort(sortedGroups) - - cacheKey := sarCacheKey{ - uid: userInfo.UID, - userGroups: strings.Join(sortedGroups, "|"), - namespace: namespace, - verb: verb, - resource: resource.Resource, - group: resource.Group, - } - if allowed, ok := a.sarCache.Get(cacheKey); ok { - return allowed, nil - } - log.Debugf( - "Cache miss for subject access review to perform %s on %s.%s in %s (user=%q, uid=%q, userGroups=%q)", - cacheKey.verb, cacheKey.group, cacheKey.resource, cacheKey.namespace, userInfo.Username, cacheKey.uid, cacheKey.userGroups, - ) - +// performSubjectAccessReview performs a SubjectAccessReview API call. +func (a *k8sAuthorizer) performSubjectAccessReview(ctx context.Context, userInfo *authenticationv1.UserInfo, verb, namespace string, resource k8sResource) (bool, error) { // In SubjectAccessReview an empty namespace means full cluster access. namespaceScope := namespace if namespace == FullClusterAccessScope { namespaceScope = "" } + sar := &authv1.SubjectAccessReview{ Spec: authv1.SubjectAccessReviewSpec{ User: userInfo.Username, @@ -237,13 +304,12 @@ func (a *k8sAuthorizer) performSubjectAccessReview(ctx context.Context, userInfo result, err := a.client.AuthorizationV1().SubjectAccessReviews().Create(ctx, sar, metav1.CreateOptions{}) if err != nil { - return false, pkghttputil.Errorf(http.StatusInternalServerError, "performing subject access review: %v", err) + return false, errors.Wrap(err, "performing subject access review") } if result.Status.EvaluationError != "" { - return false, pkghttputil.Errorf(http.StatusInternalServerError, "authorization evaluation error: %s", result.Status.EvaluationError) + return false, errors.Errorf("authorization evaluation error: %s", result.Status.EvaluationError) } - a.sarCache.Add(cacheKey, result.Status.Allowed) return result.Status.Allowed, nil } diff --git a/sensor/common/centralproxy/authorizer_test.go b/sensor/common/centralproxy/authorizer_test.go index f1e44cea3ad16..5ebf29fb0b448 100644 --- a/sensor/common/centralproxy/authorizer_test.go +++ b/sensor/common/centralproxy/authorizer_test.go @@ -4,11 +4,14 @@ import ( "context" "net/http" "net/http/httptest" + "sync/atomic" "testing" "github.com/pkg/errors" pkghttputil "github.com/stackrox/rox/pkg/httputil" + "github.com/stackrox/rox/pkg/sync" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" authenticationv1 "k8s.io/api/authentication/v1" authv1 "k8s.io/api/authorization/v1" "k8s.io/apimachinery/pkg/runtime" @@ -484,3 +487,265 @@ func TestK8sAuthorizer_TokenReviewCaching(t *testing.T) { assert.Equal(t, 2, tokenReviewCallCount, "Second request should perform TokenReview again (failures not cached)") }) } + +func TestK8sAuthorizer_TokenReviewCoalescing(t *testing.T) { + t.Run("concurrent TokenReview requests for same token make only one API call", func(t *testing.T) { + fakeClient := fake.NewClientset() + var callCount atomic.Int32 + var wg sync.WaitGroup + + // Barrier to keep the TokenReview in-flight while goroutines start. + // This ensures deterministic coalescing behavior without relying on timing. + barrier := make(chan struct{}) + + fakeClient.PrependReactor("create", "tokenreviews", func(action k8sTesting.Action) (bool, runtime.Object, error) { + callCount.Add(1) + // Block until test signals all goroutines have started + <-barrier + return true, &authenticationv1.TokenReview{ + Status: authenticationv1.TokenReviewStatus{ + Authenticated: true, + User: authenticationv1.UserInfo{ + Username: "test-user", + UID: "test-uid", + Groups: []string{"test-group"}, + }, + }, + }, nil + }) + + authorizer := newK8sAuthorizer(fakeClient) + + const numGoroutines = 10 + results := make([]*authenticationv1.UserInfo, numGoroutines) + errs := make([]error, numGoroutines) + + // Use a separate WaitGroup to track when all goroutines have started + var startWg sync.WaitGroup + startWg.Add(numGoroutines) + + // Launch concurrent requests with the same token + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + startWg.Done() // Signal this goroutine has started + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set("Authorization", "Bearer same-token") + results[idx], errs[idx] = authorizer.authenticate(context.Background(), req) + }(i) + } + + // Wait for all goroutines to start, then release the barrier + startWg.Wait() + close(barrier) + wg.Wait() + + // Verify only ONE TokenReview API call was made + assert.Equal(t, int32(1), callCount.Load(), + "expected exactly 1 TokenReview call for %d concurrent requests", numGoroutines) + + // Verify all goroutines got the same result + for i := 0; i < numGoroutines; i++ { + require.NoError(t, errs[i]) + assert.Equal(t, "test-user", results[i].Username) + } + }) +} + +func TestK8sAuthorizer_AuthorizationCoalescing(t *testing.T) { + t.Run("concurrent authorization requests for same user/namespace make only one set of SAR calls", func(t *testing.T) { + fakeClient := fake.NewClientset() + var sarCallCount atomic.Int32 + var wg sync.WaitGroup + + // Barrier to keep the SAR in-flight while goroutines start. + // This ensures deterministic coalescing behavior without relying on timing. + barrier := make(chan struct{}) + + fakeClient.PrependReactor("create", "subjectaccessreviews", func(action k8sTesting.Action) (bool, runtime.Object, error) { + sarCallCount.Add(1) + // Block until test signals all goroutines have started + <-barrier + return true, &authv1.SubjectAccessReview{ + Status: authv1.SubjectAccessReviewStatus{ + Allowed: true, + }, + }, nil + }) + + // Create authorizer with only one resource to check for simpler test + authorizer := newK8sAuthorizer(fakeClient) + authorizer.verbsToCheck = []string{"get"} + authorizer.resourcesToCheck = []k8sResource{ + {Resource: "pods", Group: ""}, + } + + userInfo := &authenticationv1.UserInfo{ + Username: "test-user", + UID: "test-uid", + Groups: []string{"test-group"}, + } + + const numGoroutines = 10 + errs := make([]error, numGoroutines) + + // Use a separate WaitGroup to track when all goroutines have started + var startWg sync.WaitGroup + startWg.Add(numGoroutines) + + // Launch concurrent authorize requests for the same user/namespace + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + startWg.Done() // Signal this goroutine has started + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + errs[idx] = authorizer.authorize(context.Background(), userInfo, req) + }(i) + } + + // Wait for all goroutines to start, then release the barrier + startWg.Wait() + close(barrier) + wg.Wait() + + // Verify only ONE SubjectAccessReview API call was made (1 resource × 1 verb) + // All concurrent requests share the same singleflight execution. + assert.Equal(t, int32(1), sarCallCount.Load(), + "expected exactly 1 SAR call for %d concurrent requests", numGoroutines) + + // Verify all goroutines got success + for i := 0; i < numGoroutines; i++ { + assert.NoError(t, errs[i]) + } + }) + + t.Run("forbidden responses are cached and coalesced", func(t *testing.T) { + fakeClient := fake.NewClientset() + var sarCallCount atomic.Int32 + var wg sync.WaitGroup + + // Barrier to keep the SAR in-flight while goroutines start. + barrier := make(chan struct{}) + + fakeClient.PrependReactor("create", "subjectaccessreviews", func(action k8sTesting.Action) (bool, runtime.Object, error) { + sarCallCount.Add(1) + <-barrier + // Return Forbidden: Allowed=false with no EvaluationError + return true, &authv1.SubjectAccessReview{ + Status: authv1.SubjectAccessReviewStatus{ + Allowed: false, + Denied: true, + Reason: "forbidden by policy", + }, + }, nil + }) + + // Create authorizer with only one resource to check for simpler test + authorizer := newK8sAuthorizer(fakeClient) + authorizer.verbsToCheck = []string{"get"} + authorizer.resourcesToCheck = []k8sResource{ + {Resource: "pods", Group: ""}, + } + + userInfo := &authenticationv1.UserInfo{ + Username: "test-user", + UID: "test-uid", + Groups: []string{"test-group"}, + } + + const numGoroutines = 10 + errs := make([]error, numGoroutines) + + var startWg sync.WaitGroup + startWg.Add(numGoroutines) + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + startWg.Done() + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + errs[idx] = authorizer.authorize(context.Background(), userInfo, req) + }(i) + } + + startWg.Wait() + close(barrier) + wg.Wait() + + // All concurrent callers should have shared a single SAR and observed Forbidden + assert.Equal(t, int32(1), sarCallCount.Load(), + "expected exactly 1 SAR call for %d concurrent forbidden requests", numGoroutines) + + for i := 0; i < numGoroutines; i++ { + require.Errorf(t, errs[i], "expected forbidden error for goroutine %d", i) + assert.Contains(t, errs[i].Error(), "lacks") + } + + // Call authorize again for the same user/namespace - should use cached forbidden result + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + err := authorizer.authorize(context.Background(), userInfo, req) + require.Error(t, err) + assert.Contains(t, err.Error(), "lacks") + assert.Equal(t, int32(1), sarCallCount.Load(), "forbidden results should be cached; expected no new SAR calls") + }) + + t.Run("transient errors are not cached", func(t *testing.T) { + fakeClient := fake.NewClientset() + var sarCallCount atomic.Int32 + shouldFail := true + + fakeClient.PrependReactor("create", "subjectaccessreviews", func(action k8sTesting.Action) (bool, runtime.Object, error) { + sarCallCount.Add(1) + if shouldFail { + return true, nil, errors.New("transient authz backend error") + } + // Success path + return true, &authv1.SubjectAccessReview{ + Status: authv1.SubjectAccessReviewStatus{ + Allowed: true, + }, + }, nil + }) + + // Create authorizer with only one resource to check for simpler test + authorizer := newK8sAuthorizer(fakeClient) + authorizer.verbsToCheck = []string{"get"} + authorizer.resourcesToCheck = []k8sResource{ + {Resource: "pods", Group: ""}, + } + + userInfo := &authenticationv1.UserInfo{ + Username: "test-user", + UID: "test-uid", + Groups: []string{"test-group"}, + } + + // First request fails with transient error + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + err := authorizer.authorize(context.Background(), userInfo, req) + require.Error(t, err) + assert.Contains(t, err.Error(), "transient authz backend error") + + firstCallCount := sarCallCount.Load() + assert.Equal(t, int32(1), firstCallCount, "first request should make exactly 1 SAR call") + + // Second request - because transient errors should not be cached, + // it should trigger another SAR call. + shouldFail = false + + req = httptest.NewRequest(http.MethodGet, "/test", nil) + req.Header.Set(stackroxNamespaceHeader, "test-namespace") + err = authorizer.authorize(context.Background(), userInfo, req) + require.NoError(t, err, "expected success after transient error was resolved") + + // We should have observed at least one more SAR call, proving errors were not cached + assert.Greater(t, sarCallCount.Load(), firstCallCount, "transient errors must not be cached") + }) +} From 743caa32763220a2e36bf3043572698b5a7bfb0a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 01:49:28 -0700 Subject: [PATCH 204/232] chore(deps): bump the aws-sdk-go-v2 group with 2 updates (#19053) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index d54158bc462e9..308304ca62acd 100644 --- a/go.mod +++ b/go.mod @@ -30,9 +30,9 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.19.7 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 + github.com/aws/aws-sdk-go-v2/service/ecr v1.55.2 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4 github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 github.com/aws/smithy-go v1.24.0 github.com/cenkalti/backoff/v3 v3.2.2 diff --git a/go.sum b/go.sum index 5c40354744e20..39a4cb9724b0f 100644 --- a/go.sum +++ b/go.sum @@ -305,8 +305,8 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEG github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0= -github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 h1:B7f9R99lCF83XlolTg6d6Lvghyto+/VU83ZrneAVfK8= -github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1/go.mod h1:cpYRXx5BkmS3mwWRKPbWSPKmyAUNL7aLWAPiiinwk/U= +github.com/aws/aws-sdk-go-v2/service/ecr v1.55.2 h1:eEiC82g/AJpNtBB73Par9iO/EbWXcl8vh6tbM8wb+EM= +github.com/aws/aws-sdk-go-v2/service/ecr v1.55.2/go.mod h1:cpYRXx5BkmS3mwWRKPbWSPKmyAUNL7aLWAPiiinwk/U= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2 h1:9fe6w8bydUwNAhFVmjo+SRqAJjbBMOyILL/6hTTVkyA= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2/go.mod h1:x7gU4CAyAz4BsM9hlRkhHiYw2GIr1QCmN45uwQw9l/E= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= @@ -321,8 +321,8 @@ github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 h1:2gom8MohxN0SnhHZBYAC4S8jHG+E github.com/aws/aws-sdk-go-v2/service/kms v1.49.4/go.mod h1:HO31s0qt0lso/ADvZQyzKs8js/ku0fMHsfyXW8OPVYc= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA= github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3 h1:FEs3IkfJWp+Sz3ZY6sAxmebBF0lr1wBcTWkuFW1OFJg= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.3/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4 h1:6zZdm6bqf975SA+6+lWlcokd9Z2fW/abVkc5E4tIm4M= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= From c1cc0c54fe914928259566d34bd13a3c9ecddd6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 09:59:19 +0100 Subject: [PATCH 205/232] chore(deps-dev): bump jsonpath from 1.2.0 to 1.2.1 in /tests/performance/load (#19000) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/performance/load/package-lock.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/performance/load/package-lock.json b/tests/performance/load/package-lock.json index 8e10227f1e378..cba3a5120f98a 100644 --- a/tests/performance/load/package-lock.json +++ b/tests/performance/load/package-lock.json @@ -1529,9 +1529,9 @@ ] }, "node_modules/jsonpath": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.0.tgz", - "integrity": "sha512-EVm29wT2coM0QfZd8TREEeMTOxZcyV3oCQ61AM0DrMkVaVCKXtPEm0oJccEbz5P9Oi+JwRkkIt0Bkn63gqCHjg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.1.tgz", + "integrity": "sha512-Jl6Jhk0jG+kP3yk59SSeGq7LFPR4JQz1DU0K+kXTysUhMostbhU3qh5mjTuf0PqFcXpAT7kvmMt9WxV10NyIgQ==", "dev": true, "dependencies": { "esprima": "1.2.5", @@ -3693,9 +3693,9 @@ "dev": true }, "jsonpath": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.0.tgz", - "integrity": "sha512-EVm29wT2coM0QfZd8TREEeMTOxZcyV3oCQ61AM0DrMkVaVCKXtPEm0oJccEbz5P9Oi+JwRkkIt0Bkn63gqCHjg==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/jsonpath/-/jsonpath-1.2.1.tgz", + "integrity": "sha512-Jl6Jhk0jG+kP3yk59SSeGq7LFPR4JQz1DU0K+kXTysUhMostbhU3qh5mjTuf0PqFcXpAT7kvmMt9WxV10NyIgQ==", "dev": true, "requires": { "esprima": "1.2.5", From 1ee28b55b4313b5921c23dcb27264a30909fceb5 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 17 Feb 2026 11:34:00 +0100 Subject: [PATCH 206/232] ROX-32943: use coalescer in centralproxy transport (#18811) --- sensor/common/centralproxy/transport.go | 45 +++++- sensor/common/centralproxy/transport_test.go | 136 +++++++++++++++++++ 2 files changed, 177 insertions(+), 4 deletions(-) diff --git a/sensor/common/centralproxy/transport.go b/sensor/common/centralproxy/transport.go index aafb7594966b0..de9620a61f333 100644 --- a/sensor/common/centralproxy/transport.go +++ b/sensor/common/centralproxy/transport.go @@ -14,6 +14,7 @@ import ( "github.com/pkg/errors" centralv1 "github.com/stackrox/rox/generated/internalapi/central/v1" "github.com/stackrox/rox/pkg/clientconn" + "github.com/stackrox/rox/pkg/coalescer" "github.com/stackrox/rox/pkg/expiringcache" pkghttputil "github.com/stackrox/rox/pkg/httputil" "github.com/stackrox/rox/pkg/mtls" @@ -31,6 +32,10 @@ const ( // Slightly longer than cache TTL to ensure tokens remain valid during cache lifetime. tokenTTL = tokenCacheTTL + 1*time.Minute + // tokenRequestTimeout is the maximum time allowed for a token request RPC. + // This ensures that token requests don't hang indefinitely when all callers have cancelled. + tokenRequestTimeout = 30 * time.Second + // FullClusterAccessScope is the namespace scope value that indicates full cluster access. FullClusterAccessScope = "*" ) @@ -170,6 +175,8 @@ type tokenProvider struct { client atomic.Pointer[centralv1.TokenServiceClient] clusterIDGetter clusterIDGetter tokenCache expiringcache.Cache[string, string] + // tokenGroup coalesces concurrent token requests for the same namespace scope. + tokenGroup *coalescer.Coalescer[string] } // newTokenProvider creates a new tokenProvider. @@ -177,6 +184,7 @@ func newTokenProvider(clusterIDGetter clusterIDGetter) *tokenProvider { return &tokenProvider{ clusterIDGetter: clusterIDGetter, tokenCache: expiringcache.NewExpiringCache[string, string](tokenCacheTTL), + tokenGroup: coalescer.New[string](), } } @@ -195,23 +203,49 @@ func (p *tokenProvider) setClient(conn grpc.ClientConnInterface) { // - "" (empty): Token with empty access scope (authentication only) // - "": Token scoped to the specific namespace // - FullClusterAccessScope ("*"): Token with full cluster access +// +// Concurrent requests for the same scope are coalesced to reduce load on Central. func (p *tokenProvider) getTokenForScope(ctx context.Context, namespaceScope string) (string, error) { client := p.client.Load() if client == nil { return "", errors.Wrap(errServiceUnavailable, "token provider not initialized: central connection not available") } + // Fast path: check cache first. if token, ok := p.tokenCache.Get(namespaceScope); ok { return token, nil } - log.Debugf("Token cache miss for namespace scope %q, requesting from Central", namespaceScope) + // Slow path: coalesce concurrent requests for the same scope. + return p.tokenGroup.Coalesce(ctx, namespaceScope, func() (string, error) { //nolint:wrapcheck + // Double-check cache inside coalesce to avoid redundant API calls. + if token, ok := p.tokenCache.Get(namespaceScope); ok { + return token, nil + } + + log.Debugf("Token cache miss for namespace scope %q, requesting from Central", namespaceScope) + + // Use a background context with timeout to ensure the shared function is independent + // of the initial request context while still having a bounded lifetime. + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + token, err := p.requestToken(ctx, *client, namespaceScope) + if err != nil { + return "", err + } + p.tokenCache.Add(namespaceScope, token) + return token, nil + }) +} + +// requestToken performs the RPC call to Central to generate a token for the given scope. +func (p *tokenProvider) requestToken(ctx context.Context, client centralv1.TokenServiceClient, namespaceScope string) (string, error) { req, err := p.buildTokenRequest(namespaceScope) if err != nil { return "", errors.Wrap(err, "building token request") } - resp, err := (*client).GenerateTokenForPermissionsAndScope(ctx, req) + resp, err := client.GenerateTokenForPermissionsAndScope(ctx, req) if err != nil { return "", errors.Wrapf(err, "requesting token from Central for scope %q", namespaceScope) } @@ -221,14 +255,17 @@ func (p *tokenProvider) getTokenForScope(ctx context.Context, namespaceScope str return "", errors.Errorf("received empty token from Central for scope %q", namespaceScope) } - p.tokenCache.Add(namespaceScope, token) - return token, nil } // invalidateToken removes the cached token for the given scope. +// It also removes the coalescer key so subsequent callers will +// trigger a fresh token request rather than joining any in-progress request. +// Note: This does not cancel already running requests; they will complete +// normally but their results will not be used by new callers. func (p *tokenProvider) invalidateToken(scope string) { p.tokenCache.Remove(scope) + p.tokenGroup.Forget(scope) } // buildTokenRequest creates the token request based on the namespace scope. diff --git a/sensor/common/centralproxy/transport_test.go b/sensor/common/centralproxy/transport_test.go index a5b24a6a9c340..48a755987ea25 100644 --- a/sensor/common/centralproxy/transport_test.go +++ b/sensor/common/centralproxy/transport_test.go @@ -8,10 +8,13 @@ import ( "net/http" "net/http/httptest" "strings" + "sync/atomic" "testing" centralv1 "github.com/stackrox/rox/generated/internalapi/central/v1" + "github.com/stackrox/rox/pkg/coalescer" "github.com/stackrox/rox/pkg/expiringcache" + "github.com/stackrox/rox/pkg/sync" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -33,6 +36,9 @@ type fakeTokenServiceClient struct { // Capture the request for verification lastRequest *centralv1.GenerateTokenForPermissionsAndScopeRequest + + // callCount tracks the number of RPC calls made (optional, set to non-nil to enable) + callCount *atomic.Int32 } func (f *fakeTokenServiceClient) GenerateTokenForPermissionsAndScope( @@ -40,6 +46,9 @@ func (f *fakeTokenServiceClient) GenerateTokenForPermissionsAndScope( in *centralv1.GenerateTokenForPermissionsAndScopeRequest, opts ...grpc.CallOption, ) (*centralv1.GenerateTokenForPermissionsAndScopeResponse, error) { + if f.callCount != nil { + f.callCount.Add(1) + } f.lastRequest = in return f.response, f.err } @@ -49,6 +58,7 @@ func newTestTokenProvider(client centralv1.TokenServiceClient, clusterID string) tp := &tokenProvider{ clusterIDGetter: &fakeClusterIDGetter{clusterID: clusterID}, tokenCache: expiringcache.NewExpiringCache[string, string](tokenCacheTTL), + tokenGroup: coalescer.New[string](), } if client != nil { tp.client.Store(&client) @@ -803,3 +813,129 @@ type roundTripperFunc func(*http.Request) (*http.Response, error) func (f roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { return f(req) } + +// barrierFakeTokenServiceClient allows controlling when the token is returned using a barrier. +// This is used for deterministic testing of request coalescing without timing dependencies. +type barrierFakeTokenServiceClient struct { + getToken func() string + barrier <-chan struct{} // If non-nil, blocks until closed before returning +} + +func (b *barrierFakeTokenServiceClient) GenerateTokenForPermissionsAndScope( + ctx context.Context, + in *centralv1.GenerateTokenForPermissionsAndScopeRequest, + opts ...grpc.CallOption, +) (*centralv1.GenerateTokenForPermissionsAndScopeResponse, error) { + if b.barrier != nil { + <-b.barrier + } + return ¢ralv1.GenerateTokenForPermissionsAndScopeResponse{ + Token: b.getToken(), + }, nil +} + +func TestTokenProvider_Coalescing(t *testing.T) { + t.Run("concurrent requests for same scope make only one RPC call", func(t *testing.T) { + var callCount atomic.Int32 + var wg sync.WaitGroup + + // Barrier to keep the RPC in-flight while goroutines start. + // This ensures deterministic coalescing behavior without relying on timing. + barrier := make(chan struct{}) + + fakeClient := &barrierFakeTokenServiceClient{ + barrier: barrier, + getToken: func() string { + callCount.Add(1) + return "coalesced-token" + }, + } + + provider := newTestTokenProvider(fakeClient, "test-cluster-id") + + const numGoroutines = 10 + tokens := make([]string, numGoroutines) + errs := make([]error, numGoroutines) + + // Use a separate WaitGroup to track when all goroutines have started + var startWg sync.WaitGroup + startWg.Add(numGoroutines) + + // Launch concurrent requests for the same scope + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + startWg.Done() // Signal this goroutine has started + tokens[idx], errs[idx] = provider.getTokenForScope(context.Background(), "shared-scope") + }(i) + } + + // Wait for all goroutines to start, then release the barrier + startWg.Wait() + close(barrier) + wg.Wait() + + // Verify only ONE RPC call was made + assert.Equal(t, int32(1), callCount.Load(), + "expected exactly 1 RPC call for %d concurrent requests", numGoroutines) + + // Verify all goroutines got the same token + for i := 0; i < numGoroutines; i++ { + require.NoError(t, errs[i]) + assert.Equal(t, "coalesced-token", tokens[i]) + } + }) + + t.Run("errors are not shared - each caller can retry", func(t *testing.T) { + var callCount atomic.Int32 + fakeClient := &fakeTokenServiceClient{ + callCount: &callCount, + } + + provider := newTestTokenProvider(fakeClient, "test-cluster-id") + + // First request fails + fakeClient.err = errors.New("transient failure") + _, err := provider.getTokenForScope(context.Background(), "error-scope") + require.Error(t, err) + + // singleflight removes the key after the call completes with error + // so a second request will trigger a new RPC call + fakeClient.err = nil + fakeClient.response = ¢ralv1.GenerateTokenForPermissionsAndScopeResponse{Token: "recovered-token"} + + token, err := provider.getTokenForScope(context.Background(), "error-scope") + require.NoError(t, err) + assert.Equal(t, "recovered-token", token) + assert.Equal(t, int32(2), callCount.Load(), "expected two RPC calls (one failure, one success)") + }) + + t.Run("invalidateToken allows fresh RPC after invalidation", func(t *testing.T) { + var callCount atomic.Int32 + + fakeClient := &dynamicFakeTokenServiceClient{ + getToken: func() string { + count := callCount.Add(1) + return fmt.Sprintf("token-%d", count) + }, + } + + provider := newTestTokenProvider(fakeClient, "test-cluster-id") + + // First request + token1, err := provider.getTokenForScope(context.Background(), "invalidate-scope") + require.NoError(t, err) + assert.Equal(t, "token-1", token1) + assert.Equal(t, int32(1), callCount.Load()) + + // Invalidate + provider.invalidateToken("invalidate-scope") + + // Second request should trigger new RPC + token2, err := provider.getTokenForScope(context.Background(), "invalidate-scope") + require.NoError(t, err) + assert.Equal(t, "token-2", token2) + assert.Equal(t, int32(2), callCount.Load()) + }) +} From 05fbdfee8540b3dcded24b951431f27138b37755 Mon Sep 17 00:00:00 2001 From: Moritz Clasmeier <111092021+mclasmeier@users.noreply.github.com> Date: Tue, 17 Feb 2026 12:22:52 +0100 Subject: [PATCH 207/232] ROX-26769: CRS max-registrations feature (#18513) Co-authored-by: Moritz Clasmeier Co-authored-by: Marcin Owsiany Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com> --- CHANGELOG.md | 4 + central/cluster/datastore/datastore.go | 3 + central/cluster/datastore/datastore_impl.go | 19 +- .../datastore/datastore_impl_postgres_test.go | 13 +- .../datastore/datastore_test_constructors.go | 5 +- central/cluster/datastore/singleton.go | 4 +- central/cluster/datastore/test_support.go | 21 + central/clusterinit/backend/backend.go | 2 +- central/clusterinit/backend/backend_impl.go | 24 +- central/clusterinit/backend/backend_test.go | 24 +- central/clusterinit/backend/mocks/backend.go | 8 +- central/clusterinit/backend/render.go | 11 + central/clusterinit/service/service_impl.go | 10 +- .../clusterinit/service/service_impl_test.go | 18 +- central/clusterinit/service/transformation.go | 13 +- central/clusterinit/store/mocks/store.go | 268 +++++++++++ central/clusterinit/store/store.go | 154 +++++- .../store/store_test_constructors.go | 14 + central/clusterinit/store/tests/store_test.go | 110 +++++ central/graphql/resolvers/test_setup_utils.go | 2 +- central/main.go | 3 +- central/pruning/pruning_test.go | 5 +- central/sensor/service/service_impl.go | 60 ++- central/sensor/service/service_impl_test.go | 439 ++++++++++++++---- generated/api/v1/cluster_init_service.pb.go | 62 ++- .../api/v1/cluster_init_service.swagger.json | 22 +- .../api/v1/cluster_init_service_vtproto.pb.go | 254 +++++++++- generated/storage/cluster_init.pb.go | 52 ++- generated/storage/cluster_init_vtproto.pb.go | 244 ++++++++++ proto/api/v1/cluster_init_service.proto | 6 +- proto/storage/cluster_init.proto | 3 + proto/storage/proto.lock | 17 + roxctl/central/crs/generate.go | 31 +- roxctl/maincommand/command_tree_debug.yaml | 1 + roxctl/maincommand/command_tree_release.yaml | 1 + tests/e2e/run-scanner-v4-install.bats | 54 ++- tools/allowed-large-files | 1 + 37 files changed, 1767 insertions(+), 215 deletions(-) create mode 100644 central/cluster/datastore/test_support.go create mode 100644 central/clusterinit/store/mocks/store.go create mode 100644 central/clusterinit/store/store_test_constructors.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 3154bfc3ef974..aae2259246213 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,10 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ### Added Features +- ROX-26769: Central API for generating CRSs now supports specifying an upper bound for cluster + registrations using the new field "max_registrations". + roxctl's "central crs generate" supports specifying a maximum number of cluster registrations + using the new parameter "--max-clusters". - ROX-31443: Automatic HTTP to HTTPS redirection is now enabled for Central OpenShift routes (passthrough and reencrypt). - ROX-29582: A `kubectl get` on a Central CR now shows the following additional columns: Version, AdminPassword, Message, Available, Progressing. - ROX-32061: The `spec.configAsCode` field in the Central CR now supports `resources`, `nodeSelector`, `tolerations`, and `hostAliases` settings for the config-controller deployment. diff --git a/central/cluster/datastore/datastore.go b/central/cluster/datastore/datastore.go index 47fd9d2e061a5..8a16a15ab2941 100644 --- a/central/cluster/datastore/datastore.go +++ b/central/cluster/datastore/datastore.go @@ -6,6 +6,7 @@ import ( alertDataStore "github.com/stackrox/rox/central/alert/datastore" clusterStore "github.com/stackrox/rox/central/cluster/store/cluster" clusterHealthStore "github.com/stackrox/rox/central/cluster/store/clusterhealth" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" compliancePruning "github.com/stackrox/rox/central/complianceoperator/v2/pruner" clusterCVEDS "github.com/stackrox/rox/central/cve/cluster/datastore" deploymentDataStore "github.com/stackrox/rox/central/deployment/datastore" @@ -95,6 +96,7 @@ func New( clusterRanker *ranking.Ranker, networkBaselineMgr networkBaselineManager.Manager, compliancePruner compliancePruning.Pruner, + clusterInitStore clusterInitStore.Store, ) (DataStore, error) { ds := &datastoreImpl{ clusterStorage: clusterStorage, @@ -119,6 +121,7 @@ func New( idToNameCache: simplecache.New(), nameToIDCache: simplecache.New(), compliancePruner: compliancePruner, + clusterInitStore: clusterInitStore, } if err := ds.buildCache(sac.WithAllAccess(context.Background())); err != nil { diff --git a/central/cluster/datastore/datastore_impl.go b/central/cluster/datastore/datastore_impl.go index 817045978a532..634df49f6bdb3 100644 --- a/central/cluster/datastore/datastore_impl.go +++ b/central/cluster/datastore/datastore_impl.go @@ -11,6 +11,7 @@ import ( alertDataStore "github.com/stackrox/rox/central/alert/datastore" clusterStore "github.com/stackrox/rox/central/cluster/store/cluster" clusterHealthStore "github.com/stackrox/rox/central/cluster/store/clusterhealth" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" compliancePruning "github.com/stackrox/rox/central/complianceoperator/v2/pruner" "github.com/stackrox/rox/central/convert/storagetoeffectiveaccessscope" clusterCVEDS "github.com/stackrox/rox/central/cve/cluster/datastore" @@ -71,6 +72,7 @@ var ( type datastoreImpl struct { clusterStorage clusterStore.Store + clusterInitStore clusterInitStore.Store clusterHealthStorage clusterHealthStore.Store clusterCVEDataStore clusterCVEDS.DataStore alertDataStore alertDataStore.DataStore @@ -894,7 +896,10 @@ func (ds *datastoreImpl) updateClusterNoLock(ctx context.Context, cluster *stora return nil } -// registrantID can be the ID of an init bundle or of a CRS. +// registrantID can be one of +// * ID of an init bundle, when connecting with an init bundle certificate. +// * ID of a CRS, when connecting with a CRS certificate. +// * Empty, when connecting with non-init service certificates. func (ds *datastoreImpl) LookupOrCreateClusterFromConfig(ctx context.Context, clusterID, registrantID string, hello *central.SensorHello) (*storage.Cluster, error) { if err := checkWriteSac(ctx, clusterID); err != nil { return nil, err @@ -935,6 +940,10 @@ func (ds *datastoreImpl) LookupOrCreateClusterFromConfig(ctx context.Context, cl } else if clusterName != "" { // At this point, we can be sure that the cluster does not exist. + if err := ds.clusterInitStore.InitiateClusterRegistration(ctx, registrantID, clusterName); err != nil { + return nil, errors.Wrapf(err, "initiating registrations of cluster %s using init artifact %s", clusterName, registrantID) + } + cluster = &storage.Cluster{ Name: clusterName, InitBundleId: registrantID, @@ -998,7 +1007,13 @@ func (ds *datastoreImpl) LookupOrCreateClusterFromConfig(ctx context.Context, cl cluster = cluster.CloneVT() cluster.ManagedBy = manager - cluster.InitBundleId = registrantID + // It would be wrong to set cluster.InitBundle to registrantID here. + // In the case of cluster creation it was already done above. + // And in case the cluster exists already, received registrantID might be empty + // and in this case we would delete it from an existing cluster here. This would + // e.g., happen on the first real connect after a CRS handshake. + // But we actually require the CRS ID to be still associated with the cluster, + // to be able to complete the registration later on. cluster.SensorCapabilities = sliceutils.CopySliceSorted(hello.GetCapabilities()) if centralsensor.SecuredClusterIsNotManagedManually(helmConfig) { configureFromHelmConfig(cluster, clusterConfig) diff --git a/central/cluster/datastore/datastore_impl_postgres_test.go b/central/cluster/datastore/datastore_impl_postgres_test.go index c70b6646d11c7..4ccb57f4e35ae 100644 --- a/central/cluster/datastore/datastore_impl_postgres_test.go +++ b/central/cluster/datastore/datastore_impl_postgres_test.go @@ -11,6 +11,7 @@ import ( alertDatastore "github.com/stackrox/rox/central/alert/datastore" clusterPostgresStore "github.com/stackrox/rox/central/cluster/store/cluster/postgres" clusterHealthPostgresStore "github.com/stackrox/rox/central/cluster/store/clusterhealth/postgres" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" compliancePruning "github.com/stackrox/rox/central/complianceoperator/v2/pruner" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" deploymentDatastore "github.com/stackrox/rox/central/deployment/datastore" @@ -47,6 +48,7 @@ import ( "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -74,6 +76,7 @@ type ClusterPostgresDataStoreTestSuite struct { roleBindingDatastore k8sRoleBindingDataStore.DataStore imageIntegrationDatastore imageIntegrationDataStore.DataStore clusterDatastore DataStore + clusterInitStore clusterInitStore.Store clusterHealthDBStore clusterHealthPostgresStore.Store } @@ -107,11 +110,12 @@ func (s *ClusterPostgresDataStoreTestSuite) SetupTest() { s.roleDatastore = k8sRoleDataStore.GetTestPostgresDataStore(s.T(), s.db.DB) s.roleBindingDatastore = k8sRoleBindingDataStore.GetTestPostgresDataStore(s.T(), s.db.DB) s.imageIntegrationDatastore = imageIntegrationDataStore.GetTestPostgresDataStore(s.T(), s.db.DB) + s.clusterInitStore = clusterInitStore.GetTestPostgresDataStore(s.T(), s.db.DB) s.clusterDatastore, err = New(clusterDBStore, s.clusterHealthDBStore, clusterCVEStore, s.alertDatastore, s.imageIntegrationDatastore, s.nsDatastore, s.deploymentDatastore, nodeStore, s.podDatastore, s.secretDatastore, netFlowStore, netEntityStore, s.serviceAccountDatastore, s.roleDatastore, s.roleBindingDatastore, sensorCnxMgr, nil, - clusterRanker, networkBaselineM, compliancePruner) + clusterRanker, networkBaselineM, compliancePruner, s.clusterInitStore) s.NoError(err) } @@ -457,6 +461,13 @@ func (s *ClusterPostgresDataStoreTestSuite) TestLookupOrCreateClusterFromConfig( } ctx := sac.WithAllAccess(context.Background()) + _ = s.clusterInitStore.Delete(ctx, bundleID) + err = s.clusterInitStore.Add(ctx, &storage.InitBundleMeta{ + Id: bundleID, + Name: "human-readable-bundle-name", + CreatedAt: timestamppb.New(time.Now()), + }) + s.NoError(err, "adding InitBundleMeta (init bundle or CRS) to clusterInitStore") if c.shouldClusterBeUpserted { clusterID, err = s.clusterDatastore.AddCluster(ctx, c.cluster) diff --git a/central/cluster/datastore/datastore_test_constructors.go b/central/cluster/datastore/datastore_test_constructors.go index c85bd284a5de7..a3c35532a3601 100644 --- a/central/cluster/datastore/datastore_test_constructors.go +++ b/central/cluster/datastore/datastore_test_constructors.go @@ -6,6 +6,7 @@ import ( alertDataStore "github.com/stackrox/rox/central/alert/datastore" clusterPostgresStore "github.com/stackrox/rox/central/cluster/store/cluster/postgres" clusterHealthPostgresStore "github.com/stackrox/rox/central/cluster/store/clusterhealth/postgres" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" compliancePruning "github.com/stackrox/rox/central/complianceoperator/v2/pruner" clusterCVEDataStore "github.com/stackrox/rox/central/cve/cluster/datastore" deploymentDataStore "github.com/stackrox/rox/central/deployment/datastore" @@ -63,6 +64,8 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) (DataStore, error) hashStore := datastore.GetTestPostgresDataStore(t, pool) + clusterInitStore := clusterInitStore.GetTestPostgresDataStore(t, pool) + sensorCnxMgr := connection.NewManager(hashManager.NewManager(hashStore)) clusterRanker := ranking.ClusterRanker() @@ -72,5 +75,5 @@ func GetTestPostgresDataStore(t testing.TB, pool postgres.DB) (DataStore, error) alertStore, iiStore, namespaceStore, deploymentStore, nodeStore, podStore, secretStore, netFlowStore, netEntityStore, serviceAccountStore, k8sRoleStore, k8sRoleBindingStore, sensorCnxMgr, nil, - clusterRanker, networkBaselineManager, compliancePruner) + clusterRanker, networkBaselineManager, compliancePruner, clusterInitStore) } diff --git a/central/cluster/datastore/singleton.go b/central/cluster/datastore/singleton.go index a4956600f7ffc..80961a103f9e4 100644 --- a/central/cluster/datastore/singleton.go +++ b/central/cluster/datastore/singleton.go @@ -4,6 +4,7 @@ import ( alertDataStore "github.com/stackrox/rox/central/alert/datastore" clusterPostgres "github.com/stackrox/rox/central/cluster/store/cluster/postgres" clusterHealthPostgres "github.com/stackrox/rox/central/cluster/store/clusterhealth/postgres" + clusterInitStoreSingleton "github.com/stackrox/rox/central/clusterinit/store/singleton" compliancePruning "github.com/stackrox/rox/central/complianceoperator/v2/pruner" clusterCVEDS "github.com/stackrox/rox/central/cve/cluster/datastore" deploymentDataStore "github.com/stackrox/rox/central/deployment/datastore" @@ -58,7 +59,8 @@ func initialize() { notifierProcessor.Singleton(), ranking.ClusterRanker(), networkBaselineManager.Singleton(), - compliancePruning.Singleton()) + compliancePruning.Singleton(), + clusterInitStoreSingleton.Singleton()) utils.CrashOnError(err) } diff --git a/central/cluster/datastore/test_support.go b/central/cluster/datastore/test_support.go new file mode 100644 index 0000000000000..8ba049e9529fc --- /dev/null +++ b/central/cluster/datastore/test_support.go @@ -0,0 +1,21 @@ +//go:build sql_integration + +package datastore + +import ( + "testing" + + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" +) + +// For certain tests (central/sensor/service/service_impl_test.go) we need to interact +// with a cluster data store and the underlying cluster init store at the same time. +// For this purpose we need a way for these tests to extract the cluster init store +// from a cluster data store. +func IntrospectClusterInitStore(t *testing.T, storeInterface DataStore) clusterInitStore.Store { + store, ok := storeInterface.(*datastoreImpl) + if !ok { + t.Fatal("unexpected datastore type") + } + return store.clusterInitStore +} diff --git a/central/clusterinit/backend/backend.go b/central/clusterinit/backend/backend.go index 9cc43d2ff095d..54d4771b16673 100644 --- a/central/clusterinit/backend/backend.go +++ b/central/clusterinit/backend/backend.go @@ -46,7 +46,7 @@ type Backend interface { GetAllCRS(ctx context.Context) ([]*storage.InitBundleMeta, error) GetCAConfig(ctx context.Context) (*CAConfig, error) Issue(ctx context.Context, name string) (*InitBundleWithMeta, error) - IssueCRS(ctx context.Context, name string, validUntil time.Time) (*CRSWithMeta, error) + IssueCRS(ctx context.Context, name string, validUntil time.Time, maxRegistrations uint64) (*CRSWithMeta, error) Revoke(ctx context.Context, id string) error CheckRevoked(ctx context.Context, id string) error authn.ValidateCertChain diff --git a/central/clusterinit/backend/backend_impl.go b/central/clusterinit/backend/backend_impl.go index 133785401dca7..fffd79fad9e6b 100644 --- a/central/clusterinit/backend/backend_impl.go +++ b/central/clusterinit/backend/backend_impl.go @@ -12,6 +12,7 @@ import ( "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/centralsensor" "github.com/stackrox/rox/pkg/crs" + "github.com/stackrox/rox/pkg/errox" "github.com/stackrox/rox/pkg/grpc/authn" "github.com/stackrox/rox/pkg/mtls" "github.com/stackrox/rox/pkg/protocompat" @@ -21,6 +22,10 @@ import ( const ( currentCrsVersion = 1 + + // We enforce an upper bound for this setting because for this feature it is required to maintain a list of cluster names + // per CRS in the storage and therefore we need to keep the storage requirements under control. + maxRegistrationsUpperLimit = 100 ) var _ authn.ValidateCertChain = (*backendImpl)(nil) @@ -156,7 +161,7 @@ func (b *backendImpl) Issue(ctx context.Context, name string) (*InitBundleWithMe }, nil } -func (b *backendImpl) IssueCRS(ctx context.Context, name string, validUntil time.Time) (*CRSWithMeta, error) { +func (b *backendImpl) IssueCRS(ctx context.Context, name string, validUntil time.Time, maxRegistrations uint64) (*CRSWithMeta, error) { if err := access.CheckAccess(ctx, storage.Access_READ_WRITE_ACCESS); err != nil { return nil, err } @@ -166,6 +171,10 @@ func (b *backendImpl) IssueCRS(ctx context.Context, name string, validUntil time return nil, err } + if maxRegistrations > maxRegistrationsUpperLimit { + return nil, errox.InvalidArgs.Newf("cluster registration limit must be in the range 0...%d", maxRegistrationsUpperLimit) + } + caCert, err := b.certProvider.GetCA() if err != nil { return nil, errors.Wrap(err, "retrieving CA certificate") @@ -189,12 +198,13 @@ func (b *backendImpl) IssueCRS(ctx context.Context, name string, validUntil time // On the storage side we are reusing the InitBundleMeta. meta := &storage.InitBundleMeta{ - Id: id.String(), - Name: name, - CreatedAt: protocompat.TimestampNow(), - CreatedBy: user, - ExpiresAt: expiryTimestamp, - Version: storage.InitBundleMeta_CRS, + Id: id.String(), + Name: name, + CreatedAt: protocompat.TimestampNow(), + CreatedBy: user, + ExpiresAt: expiryTimestamp, + Version: storage.InitBundleMeta_CRS, + MaxRegistrations: maxRegistrations, } if err := b.store.Add(storeCtx, meta); err != nil { diff --git a/central/clusterinit/backend/backend_test.go b/central/clusterinit/backend/backend_test.go index 2e9aa94a5abba..fe3884fc931bd 100644 --- a/central/clusterinit/backend/backend_test.go +++ b/central/clusterinit/backend/backend_test.go @@ -227,11 +227,11 @@ func (s *clusterInitBackendTestSuite) TestCRSNameMustBeUnique() { crsName := "test1" // Issue new CRS. - _, err := s.backend.IssueCRS(ctx, crsName, time.Time{}) + _, err := s.backend.IssueCRS(ctx, crsName, time.Time{}, 0) s.Require().NoError(err) // Attempt to issue again with same name. - _, err = s.backend.IssueCRS(ctx, crsName, time.Time{}) + _, err = s.backend.IssueCRS(ctx, crsName, time.Time{}, 0) s.Require().Error(err) s.Require().ErrorIs(err, store.ErrInitBundleDuplicateName) } @@ -239,7 +239,7 @@ func (s *clusterInitBackendTestSuite) TestCRSNameMustBeUnique() { func (s *clusterInitBackendTestSuite) TestCRSDefaultExpiration() { expectedNotAfter := time.Now().UTC().Add(24 * time.Hour) - crsWithMeta, err := s.backend.IssueCRS(s.ctx, "crs-default-expiration", time.Time{}.UTC()) + crsWithMeta, err := s.backend.IssueCRS(s.ctx, "crs-default-expiration", time.Time{}.UTC(), 0) s.Require().NoError(err) certPEM := []byte(crsWithMeta.CRS.Cert) @@ -259,7 +259,7 @@ func (s *clusterInitBackendTestSuite) TestCRSExpirationValidUntil() { validUntil, err := time.Parse(time.RFC3339, "2106-01-02T15:04:05Z") s.Require().NoError(err) - crsWithMeta, err := s.backend.IssueCRS(ctx, crsName, validUntil) + crsWithMeta, err := s.backend.IssueCRS(ctx, crsName, validUntil, 0) s.Require().NoError(err) certPEM := []byte(crsWithMeta.CRS.Cert) @@ -275,7 +275,7 @@ func (s *clusterInitBackendTestSuite) TestCRSLifecycle() { crsName := "test1" // Issue new CRS. - crsWithMeta, err := s.backend.IssueCRS(ctx, crsName, time.Time{}) + crsWithMeta, err := s.backend.IssueCRS(ctx, crsName, time.Time{}, 0) s.Require().NoError(err) id := crsWithMeta.Meta.GetId() @@ -354,14 +354,18 @@ func (s *clusterInitBackendTestSuite) TestCRSLifecycle() { } } +func (s *clusterInitBackendTestSuite) TestCrsIssuingWithTooLargeRegistrationLimit() { + crsName := "test-crs-exceeding-limit" + _, err := s.backend.IssueCRS(s.ctx, crsName, time.Time{}, 101) + s.Require().Error(err, "issuing CRS with maxRegistrations=101 succeeded.") +} + // Tests if attempt to issue two init bundles with the same name fails as expected. func (s *clusterInitBackendTestSuite) TestIssuingWithDuplicateName() { - ctx := s.ctx - - _, err := s.backend.Issue(ctx, "test2") + _, err := s.backend.Issue(s.ctx, "test2") s.Require().NoError(err) - _, err = s.backend.Issue(ctx, "test2") + _, err = s.backend.Issue(s.ctx, "test2") s.Require().Error(err, "issuing two init bundles with the same name") } @@ -382,7 +386,7 @@ func (s *clusterInitBackendTestSuite) TestValidateClientCertificateNotFound() { err := s.backend.ValidateClientCertificate(ctx, certs) s.Require().Error(err) - s.Equal(fmt.Sprintf("failed checking init bundle status %[1]q: retrieving init bundle %[1]q: init bundle not found", id), err.Error()) + s.Equal(fmt.Sprintf("failed checking init bundle status %[1]q: retrieving init bundle %[1]q: init bundle or CRS not found", id), err.Error()) } func (s *clusterInitBackendTestSuite) TestValidateClientCertificateEphemeralInitBundle() { diff --git a/central/clusterinit/backend/mocks/backend.go b/central/clusterinit/backend/mocks/backend.go index dbcb55ba72597..7ce7d3eabded1 100644 --- a/central/clusterinit/backend/mocks/backend.go +++ b/central/clusterinit/backend/mocks/backend.go @@ -119,18 +119,18 @@ func (mr *MockBackendMockRecorder) Issue(ctx, name any) *gomock.Call { } // IssueCRS mocks base method. -func (m *MockBackend) IssueCRS(ctx context.Context, name string, validUntil time.Time) (*backend.CRSWithMeta, error) { +func (m *MockBackend) IssueCRS(ctx context.Context, name string, validUntil time.Time, maxRegistrations uint64) (*backend.CRSWithMeta, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IssueCRS", ctx, name, validUntil) + ret := m.ctrl.Call(m, "IssueCRS", ctx, name, validUntil, maxRegistrations) ret0, _ := ret[0].(*backend.CRSWithMeta) ret1, _ := ret[1].(error) return ret0, ret1 } // IssueCRS indicates an expected call of IssueCRS. -func (mr *MockBackendMockRecorder) IssueCRS(ctx, name, validUntil any) *gomock.Call { +func (mr *MockBackendMockRecorder) IssueCRS(ctx, name, validUntil, maxRegistrations any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssueCRS", reflect.TypeOf((*MockBackend)(nil).IssueCRS), ctx, name, validUntil) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IssueCRS", reflect.TypeOf((*MockBackend)(nil).IssueCRS), ctx, name, validUntil, maxRegistrations) } // Revoke mocks base method. diff --git a/central/clusterinit/backend/render.go b/central/clusterinit/backend/render.go index ea31101f22c07..962e99593d950 100644 --- a/central/clusterinit/backend/render.go +++ b/central/clusterinit/backend/render.go @@ -40,6 +40,7 @@ const ( # It is used for setting up StackRox secured clusters. # NOTE: This file contains secret data that allows connecting new secured clusters to central, # and needs to be handled and stored accordingly. +# ` ) @@ -174,6 +175,16 @@ func (b *CRSWithMeta) RenderAsK8sSecret() ([]byte, error) { var buf bytes.Buffer _, _ = fmt.Fprint(&buf, crsHeader) + var what string + switch b.Meta.GetMaxRegistrations() { + case 0: + what = "any number of clusters" + case 1: + what = "only one cluster" + default: + what = fmt.Sprintf("at most %v clusters", b.Meta.GetMaxRegistrations()) + } + _, _ = fmt.Fprintf(&buf, "# This Cluster Registration Secret can be used for registering %s.\n", what) crs, err := crs.SerializeSecret(b.CRS) if err != nil { diff --git a/central/clusterinit/service/service_impl.go b/central/clusterinit/service/service_impl.go index df69adf42c4a1..a38491fc3e67e 100644 --- a/central/clusterinit/service/service_impl.go +++ b/central/clusterinit/service/service_impl.go @@ -137,7 +137,7 @@ func (s *serviceImpl) GenerateCRS(ctx context.Context, request *v1.CRSGenRequest return nil, status.Error(codes.Unimplemented, "support for generating Cluster Registration Secrets (CRS) is not enabled") } - generated, err := s.backend.IssueCRS(ctx, request.GetName(), time.Time{}) + generated, err := s.backend.IssueCRS(ctx, request.GetName(), time.Time{}, 0) if err != nil { if errors.Is(err, store.ErrInitBundleDuplicateName) { return nil, status.Errorf(codes.AlreadyExists, "generating new CRS: %s", err) @@ -162,10 +162,6 @@ func (s *serviceImpl) GenerateCRSExtended(ctx context.Context, request *v1.CRSGe return nil, status.Error(codes.Unimplemented, "support for generating Cluster Registration Secrets (CRS) is not enabled") } - if request.GetMaxRegistrations() != 0 { - return nil, errox.NotImplemented.CausedBy("max-registration limits not supported") - } - reqValidUntil := request.GetValidUntil() reqValidFor := request.GetValidFor() if reqValidUntil != nil && reqValidFor != nil { @@ -177,7 +173,9 @@ func (s *serviceImpl) GenerateCRSExtended(ctx context.Context, request *v1.CRSGe validUntil = protocompat.NilOrNow(reqValidUntil).Add(reqValidFor.AsDuration()) } - generated, err := s.backend.IssueCRS(ctx, request.GetName(), validUntil) + reqMaxRegistrations := request.GetMaxRegistrations() + + generated, err := s.backend.IssueCRS(ctx, request.GetName(), validUntil, reqMaxRegistrations) if err != nil { if errors.Is(err, store.ErrInitBundleDuplicateName) { return nil, status.Errorf(codes.AlreadyExists, "generating new CRS: %s", err) diff --git a/central/clusterinit/service/service_impl_test.go b/central/clusterinit/service/service_impl_test.go index ba88f77356366..aa3fff09458b0 100644 --- a/central/clusterinit/service/service_impl_test.go +++ b/central/clusterinit/service/service_impl_test.go @@ -59,8 +59,8 @@ func TestGenerateCRS(t *testing.T) { mockBackend := mocks.NewMockBackend(mockCtrl) service := New(mockBackend, mockStore) - mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( - func(_ context.Context, _ string, validUntil time.Time) (*backend.CRSWithMeta, error) { + mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(_ context.Context, _ string, validUntil time.Time, maxRegistrations uint64) (*backend.CRSWithMeta, error) { assert.True(t, validUntil.IsZero()) crsWithMeta := &backend.CRSWithMeta{ CRS: &crs.CRS{}, @@ -84,8 +84,8 @@ func TestGenerateCRSWithoutValidity(t *testing.T) { mockBackend := mocks.NewMockBackend(mockCtrl) service := New(mockBackend, mockStore) - mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( - func(_ context.Context, _ string, validUntil time.Time) (*backend.CRSWithMeta, error) { + mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(_ context.Context, _ string, validUntil time.Time, maxRegistrations uint64) (*backend.CRSWithMeta, error) { assert.True(t, validUntil.IsZero()) crsWithMeta := &backend.CRSWithMeta{ CRS: &crs.CRS{}, @@ -111,10 +111,10 @@ func TestGenerateCRSWithValidUntil(t *testing.T) { reqValidUntil, err := time.Parse(time.RFC3339, "2100-01-02T13:04:05Z") assert.NoError(t, err, "parsing RFC3339 timestamp failed") - mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( // Verify that the validUntil timestamp passed to the backend matches what is specified // in the service request. - func(_ context.Context, _ string, validUntil time.Time) (*backend.CRSWithMeta, error) { + func(_ context.Context, _ string, validUntil time.Time, maxRegistrations uint64) (*backend.CRSWithMeta, error) { assert.True(t, validUntil.Equal(reqValidUntil)) crsWithMeta := &backend.CRSWithMeta{ CRS: &crs.CRS{}, @@ -142,8 +142,8 @@ func TestGenerateCRSWithValidFor(t *testing.T) { expectedValidUntil := time.Now().Add(reqValidFor) epsilon := 10 * time.Second - mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( - func(_ context.Context, _ string, validUntil time.Time) (*backend.CRSWithMeta, error) { + mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(_ context.Context, _ string, validUntil time.Time, maxRegistrations uint64) (*backend.CRSWithMeta, error) { // Verify that the validUntil passed to the backend matches now() + validFor. timeDelta := validUntil.Sub(expectedValidUntil) assert.Less(t, timeDelta, epsilon, "CRS valid for longer than expected") @@ -173,8 +173,6 @@ func TestGenerateCRSWithValidForAndValidUntil(t *testing.T) { assert.NoError(t, err, "parsing RFC3339 timestamp failed") reqValidFor := 10 * time.Minute - mockBackend.EXPECT().IssueCRS(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - request := &v1.CRSGenRequestExtended{ Name: "secured-cluster", ValidUntil: timestamppb.New(reqValidUntil), diff --git a/central/clusterinit/service/transformation.go b/central/clusterinit/service/transformation.go index aad16592995a4..7beaf0c683b40 100644 --- a/central/clusterinit/service/transformation.go +++ b/central/clusterinit/service/transformation.go @@ -24,10 +24,13 @@ func initBundleMetaStorageToV1WithImpactedClusters(meta *storage.InitBundleMeta, func crsMetaStorageToV1(meta *storage.InitBundleMeta) *v1.CRSMeta { return &v1.CRSMeta{ - Id: meta.GetId(), - Name: meta.GetName(), - CreatedAt: meta.GetCreatedAt(), - CreatedBy: meta.GetCreatedBy(), - ExpiresAt: meta.GetExpiresAt(), + Id: meta.GetId(), + Name: meta.GetName(), + CreatedAt: meta.GetCreatedAt(), + CreatedBy: meta.GetCreatedBy(), + ExpiresAt: meta.GetExpiresAt(), + MaxRegistrations: meta.GetMaxRegistrations(), + RegistrationsInitiated: meta.GetRegistrationsInitiated(), + RegistrationsCompleted: meta.GetRegistrationsCompleted(), } } diff --git a/central/clusterinit/store/mocks/store.go b/central/clusterinit/store/mocks/store.go new file mode 100644 index 0000000000000..96cfbd674ebd2 --- /dev/null +++ b/central/clusterinit/store/mocks/store.go @@ -0,0 +1,268 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: store.go +// +// Generated by this command: +// +// mockgen -package mocks -destination mocks/store.go -source store.go +// + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + storage "github.com/stackrox/rox/generated/storage" + gomock "go.uber.org/mock/gomock" +) + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder + isgomock struct{} +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockStore) Add(ctx context.Context, bundleMeta *storage.InitBundleMeta) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", ctx, bundleMeta) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockStoreMockRecorder) Add(ctx, bundleMeta any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockStore)(nil).Add), ctx, bundleMeta) +} + +// Delete mocks base method. +func (m *MockStore) Delete(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockStoreMockRecorder) Delete(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockStore)(nil).Delete), ctx, id) +} + +// Get mocks base method. +func (m *MockStore) Get(ctx context.Context, id string) (*storage.InitBundleMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, id) + ret0, _ := ret[0].(*storage.InitBundleMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockStoreMockRecorder) Get(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockStore)(nil).Get), ctx, id) +} + +// GetAll mocks base method. +func (m *MockStore) GetAll(ctx context.Context) ([]*storage.InitBundleMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAll", ctx) + ret0, _ := ret[0].([]*storage.InitBundleMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAll indicates an expected call of GetAll. +func (mr *MockStoreMockRecorder) GetAll(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAll", reflect.TypeOf((*MockStore)(nil).GetAll), ctx) +} + +// GetAllCRS mocks base method. +func (m *MockStore) GetAllCRS(ctx context.Context) ([]*storage.InitBundleMeta, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllCRS", ctx) + ret0, _ := ret[0].([]*storage.InitBundleMeta) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllCRS indicates an expected call of GetAllCRS. +func (mr *MockStoreMockRecorder) GetAllCRS(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllCRS", reflect.TypeOf((*MockStore)(nil).GetAllCRS), ctx) +} + +// InitiateClusterRegistration mocks base method. +func (m *MockStore) InitiateClusterRegistration(ctx context.Context, initArtifactId, clusterName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitiateClusterRegistration", ctx, initArtifactId, clusterName) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitiateClusterRegistration indicates an expected call of InitiateClusterRegistration. +func (mr *MockStoreMockRecorder) InitiateClusterRegistration(ctx, initArtifactId, clusterName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitiateClusterRegistration", reflect.TypeOf((*MockStore)(nil).InitiateClusterRegistration), ctx, initArtifactId, clusterName) +} + +// MarkClusterRegistrationComplete mocks base method. +func (m *MockStore) MarkClusterRegistrationComplete(ctx context.Context, initArtifactId, clusterName string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkClusterRegistrationComplete", ctx, initArtifactId, clusterName) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkClusterRegistrationComplete indicates an expected call of MarkClusterRegistrationComplete. +func (mr *MockStoreMockRecorder) MarkClusterRegistrationComplete(ctx, initArtifactId, clusterName any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkClusterRegistrationComplete", reflect.TypeOf((*MockStore)(nil).MarkClusterRegistrationComplete), ctx, initArtifactId, clusterName) +} + +// Revoke mocks base method. +func (m *MockStore) Revoke(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Revoke", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// Revoke indicates an expected call of Revoke. +func (mr *MockStoreMockRecorder) Revoke(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revoke", reflect.TypeOf((*MockStore)(nil).Revoke), ctx, id) +} + +// Upsert mocks base method. +func (m *MockStore) Upsert(ctx context.Context, crs *storage.InitBundleMeta) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Upsert", ctx, crs) + ret0, _ := ret[0].(error) + return ret0 +} + +// Upsert indicates an expected call of Upsert. +func (mr *MockStoreMockRecorder) Upsert(ctx, crs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upsert", reflect.TypeOf((*MockStore)(nil).Upsert), ctx, crs) +} + +// MockUnderlyingStore is a mock of UnderlyingStore interface. +type MockUnderlyingStore struct { + ctrl *gomock.Controller + recorder *MockUnderlyingStoreMockRecorder + isgomock struct{} +} + +// MockUnderlyingStoreMockRecorder is the mock recorder for MockUnderlyingStore. +type MockUnderlyingStoreMockRecorder struct { + mock *MockUnderlyingStore +} + +// NewMockUnderlyingStore creates a new mock instance. +func NewMockUnderlyingStore(ctrl *gomock.Controller) *MockUnderlyingStore { + mock := &MockUnderlyingStore{ctrl: ctrl} + mock.recorder = &MockUnderlyingStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnderlyingStore) EXPECT() *MockUnderlyingStoreMockRecorder { + return m.recorder +} + +// Delete mocks base method. +func (m *MockUnderlyingStore) Delete(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockUnderlyingStoreMockRecorder) Delete(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockUnderlyingStore)(nil).Delete), ctx, id) +} + +// Exists mocks base method. +func (m *MockUnderlyingStore) Exists(ctx context.Context, id string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exists", ctx, id) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exists indicates an expected call of Exists. +func (mr *MockUnderlyingStoreMockRecorder) Exists(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockUnderlyingStore)(nil).Exists), ctx, id) +} + +// Get mocks base method. +func (m *MockUnderlyingStore) Get(ctx context.Context, id string) (*storage.InitBundleMeta, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, id) + ret0, _ := ret[0].(*storage.InitBundleMeta) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Get indicates an expected call of Get. +func (mr *MockUnderlyingStoreMockRecorder) Get(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockUnderlyingStore)(nil).Get), ctx, id) +} + +// Upsert mocks base method. +func (m *MockUnderlyingStore) Upsert(ctx context.Context, obj *storage.InitBundleMeta) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Upsert", ctx, obj) + ret0, _ := ret[0].(error) + return ret0 +} + +// Upsert indicates an expected call of Upsert. +func (mr *MockUnderlyingStoreMockRecorder) Upsert(ctx, obj any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upsert", reflect.TypeOf((*MockUnderlyingStore)(nil).Upsert), ctx, obj) +} + +// Walk mocks base method. +func (m *MockUnderlyingStore) Walk(ctx context.Context, fn func(*storage.InitBundleMeta) error) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Walk", ctx, fn) + ret0, _ := ret[0].(error) + return ret0 +} + +// Walk indicates an expected call of Walk. +func (mr *MockUnderlyingStoreMockRecorder) Walk(ctx, fn any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Walk", reflect.TypeOf((*MockUnderlyingStore)(nil).Walk), ctx, fn) +} diff --git a/central/clusterinit/store/store.go b/central/clusterinit/store/store.go index 3fd82e25828a7..cc4b74a6cee5a 100644 --- a/central/clusterinit/store/store.go +++ b/central/clusterinit/store/store.go @@ -5,27 +5,38 @@ import ( "github.com/pkg/errors" "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/logging" + "github.com/stackrox/rox/pkg/set" "github.com/stackrox/rox/pkg/sync" ) var ( - // ErrInitBundleNotFound signals that a requested init bundle could not be located in the store. - ErrInitBundleNotFound = errors.New("init bundle not found") + // ErrInitBundleNotFound signals that a requested init bundle or CRS could not be located in the store. + ErrInitBundleNotFound = errors.New("init bundle or CRS not found") - // ErrInitBundleIDCollision signals that an init bundle could not be added to the store due to an ID collision. - ErrInitBundleIDCollision = errors.New("init bundle ID collision") + // ErrInitBundleIDCollision signals that an init bundle or a CRS could not be added to the store due to an ID collision. + ErrInitBundleIDCollision = errors.New("init bundle or CRS ID collision") - // ErrInitBundleDuplicateName signals that an init bundle or CRS could not be added because the name already exists on a non-revoked init bundle or CRS. + // ErrInitBundleDuplicateName signals that an init bundle or a CRS could not be added because the name already exists for + // a non-revoked init bundle or CRS. ErrInitBundleDuplicateName = errors.New("init bundle or CRS already exists") + + log = logging.LoggerForModule() ) // Store interface for managing persisted cluster init bundles. +// +//go:generate mockgen-wrapper type Store interface { GetAll(ctx context.Context) ([]*storage.InitBundleMeta, error) GetAllCRS(ctx context.Context) ([]*storage.InitBundleMeta, error) Get(ctx context.Context, id string) (*storage.InitBundleMeta, error) Add(ctx context.Context, bundleMeta *storage.InitBundleMeta) error + Delete(ctx context.Context, id string) error + Upsert(ctx context.Context, crs *storage.InitBundleMeta) error Revoke(ctx context.Context, id string) error + InitiateClusterRegistration(ctx context.Context, initArtifactId, clusterName string) error + MarkClusterRegistrationComplete(ctx context.Context, initArtifactId, clusterName string) error } // UnderlyingStore is the base store that actually accesses the data @@ -109,6 +120,13 @@ func (w *storeImpl) Add(ctx context.Context, meta *storage.InitBundleMeta) error return w.store.Upsert(ctx, meta) } +func (w *storeImpl) Delete(ctx context.Context, id string) error { + w.uniqueUpdateMutex.Lock() + defer w.uniqueUpdateMutex.Unlock() + + return w.store.Delete(ctx, id) +} + func (w *storeImpl) checkDuplicateName(ctx context.Context, meta *storage.InitBundleMeta) error { metas, err := w.GetAll(ctx) if err != nil { @@ -127,6 +145,13 @@ func (w *storeImpl) checkDuplicateName(ctx context.Context, meta *storage.InitBu return nil } +func (w *storeImpl) Upsert(ctx context.Context, crs *storage.InitBundleMeta) error { + w.uniqueUpdateMutex.Lock() + defer w.uniqueUpdateMutex.Unlock() + + return w.store.Upsert(ctx, crs) +} + func (w *storeImpl) Revoke(ctx context.Context, id string) error { w.uniqueUpdateMutex.Lock() defer w.uniqueUpdateMutex.Unlock() @@ -145,3 +170,122 @@ func (w *storeImpl) Revoke(ctx context.Context, id string) error { } return nil } + +// InitiateClusterRegistration checks if another registration using the CRS with the provided CRS ID is possible. +// If the provided id belongs to an init bundle, then registration is always allowed, without any bookkeeping. +func (w *storeImpl) InitiateClusterRegistration(ctx context.Context, initArtifactId, clusterName string) error { + w.uniqueUpdateMutex.Lock() + defer w.uniqueUpdateMutex.Unlock() + + initArtifactMeta, err := w.Get(ctx, initArtifactId) + if err != nil { + return errors.Wrapf(err, "retrieving init artifact meta data for ID %q", initArtifactId) + } + + log.Infof("Attempting registration for cluster %s using %s %s.", clusterName, initArtifactMeta.GetVersion().String(), initArtifactMeta.GetId()) + if initArtifactMeta.GetIsRevoked() { + log.Warnf("Init artifact %s is revoked, registration of cluster %s not allowed.", initArtifactId, clusterName) + return errors.Errorf("Init artifact %s is revoked", initArtifactMeta.GetId()) + } + + if initArtifactMeta.GetVersion() == storage.InitBundleMeta_INIT_BUNDLE { + return nil + } + + crsMeta := initArtifactMeta + maxRegistrations := crsMeta.GetMaxRegistrations() + + if maxRegistrations == 0 { + // We don't do any bookkeeping in this case to prevent the clusterinit storage holding the CRS IDs from growing unbounded. + log.Infof("Allowing registration of cluster %s using CRS %s without registration limit.", clusterName, crsMeta.GetId()) + return nil + } + + // Bookkeeping for registration-limited CRS. + registrationsInitiatedSet := set.NewStringSet(crsMeta.GetRegistrationsInitiated()...) + registrationsCompletedSet := set.NewStringSet(crsMeta.GetRegistrationsCompleted()...) + numRegistrationsTotal := uint64(len(registrationsInitiatedSet) + len(registrationsCompletedSet)) + if numRegistrationsTotal >= maxRegistrations { + log.Warnf("maximum number of cluster registrations (%d/%d) with the provided cluster registration secret %s/%q reached.", + numRegistrationsTotal, maxRegistrations, + crsMeta.GetId(), crsMeta.GetName()) + return errors.New("maximum number of allowed cluster registrations reached") + } + if registrationsCompletedSet.Contains(clusterName) { + return errors.Errorf("cluster %s already registered with cluster registration secret %s/%q", clusterName, crsMeta.GetId(), crsMeta.GetName()) + } + + if !registrationsInitiatedSet.Add(clusterName) { + log.Warnf("Attempting to initiate registration of cluster %s, even though it is already associated with CRS %s.", clusterName, crsMeta.GetId()) + return nil + } + + crsMeta.RegistrationsInitiated = registrationsInitiatedSet.AsSlice() + if err := w.store.Upsert(ctx, crsMeta); err != nil { + return errors.Wrapf(err, "updating meta data for cluster registration secret %s/%q", crsMeta.GetId(), crsMeta.GetName()) + } + log.Infof("Added cluster %s to list of initiated registrations for CRS %s (%s).", clusterName, crsMeta.GetName(), crsMeta.GetId()) + + return nil +} + +func (w *storeImpl) MarkClusterRegistrationComplete(ctx context.Context, initArtifactId, clusterName string) error { + w.uniqueUpdateMutex.Lock() + defer w.uniqueUpdateMutex.Unlock() + + initArtifactMeta, err := w.Get(ctx, initArtifactId) + if err != nil { + return errors.Wrapf(err, "retrieving init artifact meta data for ID %q", initArtifactId) + } + + log.Infof("Completing registration of cluster %s using %s %s.", clusterName, initArtifactMeta.GetVersion().String(), initArtifactMeta.GetId()) + + if initArtifactMeta.GetVersion() == storage.InitBundleMeta_INIT_BUNDLE { + return nil + } + + crsMeta := initArtifactMeta + maxRegistrations := crsMeta.GetMaxRegistrations() + + if maxRegistrations == 0 { + log.Infof("Completing the registration of cluster %s using CRS %s without allowed registration limit.", clusterName, crsMeta.GetId()) + return nil + } + + // Bookkeeping for registration-limited CRS. + + log.Infof("Marking registration of cluster %s using CRS %s as complete.", clusterName, crsMeta.GetId()) + registrationsInitiatedSet := set.NewStringSet(crsMeta.GetRegistrationsInitiated()...) + registrationsCompletedSet := set.NewStringSet(crsMeta.GetRegistrationsCompleted()...) + + if registrationsCompletedSet.Contains(clusterName) { + // Already done? + log.Infof("Registration of cluster %s using CRS %s already completed.", clusterName, initArtifactId) + return nil + } + + if !registrationsInitiatedSet.Contains(clusterName) { + return errors.Errorf("registration for cluster %s using cluster registration secret %s/%q not initiated", clusterName, crsMeta.GetId(), crsMeta.GetName()) + } + + _ = registrationsInitiatedSet.Remove(clusterName) + _ = registrationsCompletedSet.Add(clusterName) + updatedRegistrationsInitiated := uint64(len(registrationsInitiatedSet)) + updatedRegistrationsCompleted := uint64(len(registrationsCompletedSet)) + // Revoke CRS, if the limit for completed registrations is reached and if no registrations are currently in flight. + if updatedRegistrationsCompleted >= maxRegistrations && updatedRegistrationsInitiated == 0 { + crsMeta.IsRevoked = true + } + + crsMeta.RegistrationsInitiated = registrationsInitiatedSet.AsSlice() + crsMeta.RegistrationsCompleted = registrationsCompletedSet.AsSlice() + if err := w.store.Upsert(ctx, crsMeta); err != nil { + return errors.Wrapf(err, "updating meta data for cluster registration secret %q", crsMeta.GetId()) + } + + if crsMeta.GetIsRevoked() { + log.Infof("Marked CRS %s as revoked.", crsMeta.GetId()) + } + + return nil +} diff --git a/central/clusterinit/store/store_test_constructors.go b/central/clusterinit/store/store_test_constructors.go new file mode 100644 index 0000000000000..c4d2c3981761b --- /dev/null +++ b/central/clusterinit/store/store_test_constructors.go @@ -0,0 +1,14 @@ +package store + +import ( + "testing" + + pgStore "github.com/stackrox/rox/central/clusterinit/store/postgres" + "github.com/stackrox/rox/pkg/postgres" +) + +// GetTestPostgresDataStore provides a datastore connected to postgres for testing purposes. +func GetTestPostgresDataStore(_ testing.TB, pool postgres.DB) Store { + dbStore := pgStore.New(pool) + return NewStore(dbStore) +} diff --git a/central/clusterinit/store/tests/store_test.go b/central/clusterinit/store/tests/store_test.go index 5e1115de3dcb8..dd1498a856ff8 100644 --- a/central/clusterinit/store/tests/store_test.go +++ b/central/clusterinit/store/tests/store_test.go @@ -5,7 +5,10 @@ package tests import ( "context" "errors" + "fmt" + randpkg "math/rand/v2" "testing" + "time" "github.com/stackrox/rox/central/clusterinit/store" pgStore "github.com/stackrox/rox/central/clusterinit/store/postgres" @@ -13,7 +16,13 @@ import ( "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/pgtest" "github.com/stackrox/rox/pkg/sac" + "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/suite" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + rand = randpkg.New(randpkg.NewPCG(3, 14159)) ) func TestClusterInitStore(t *testing.T) { @@ -118,3 +127,104 @@ func (s *clusterInitStoreTestSuite) TestRevokeToken() { s.Equal(toReuseMetaName.GetName(), reused.GetName()) s.Equal(toRevokeMeta.GetName(), reused.GetName()) } + +// Tests auto revocation for a CRS with maxRegistrations == 0 (unlimited registrations). + +func (s *clusterInitStoreTestSuite) TestCrsWithoutMaxRegistrations() { + clusterName := fmt.Sprintf("some-cluster-%s", uuid.NewV4().String()) + crsId := uuid.NewV4().String() + crsMeta := &storage.InitBundleMeta{ + Id: crsId, + Name: fmt.Sprintf("test-crs-unlimited-1-%d", rand.IntN(10000)), + CreatedAt: timestamppb.New(time.Now()), + Version: storage.InitBundleMeta_CRS, + } + err := s.store.Add(s.ctx, crsMeta) + s.Require().NoError(err, "adding CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording initiated registration for CRS %s failed", crsId) + + crsMeta, err = s.store.Get(s.ctx, crsMeta.GetId()) + s.NoErrorf(err, "retrieving CRS %s failed", crsId) + s.Empty(crsMeta.GetRegistrationsInitiated(), "CRS %s has registrationsInitiated non-empty, even though registrations are unlimited", crsId) + + err = s.store.MarkClusterRegistrationComplete(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording completed registration for CRS %s failed", crsId) + s.Empty(crsMeta.GetRegistrationsInitiated(), "CRS %s has registrationsInitiated non-empty, even though registrations are unlimited", crsId) + s.Empty(crsMeta.GetRegistrationsCompleted(), "CRS %s has registrationsCompleted non-empty, even though registrations are unlimited", crsId) + s.Falsef(crsMeta.GetIsRevoked(), "CRS %s is revoked", crsId) +} + +// Tests auto revocation for a CRS with maxRegistrations == 1. + +func (s *clusterInitStoreTestSuite) TestCrsAutoRevocationOneShot() { + clusterName := fmt.Sprintf("some-cluster-%s", uuid.NewV4().String()) + crsId := uuid.NewV4().String() + crsMeta := &storage.InitBundleMeta{ + Id: crsId, + Name: fmt.Sprintf("test-crs-auto-revocation-1-%d", rand.IntN(10000)), + CreatedAt: timestamppb.New(time.Now()), + Version: storage.InitBundleMeta_CRS, + MaxRegistrations: 1, + } + err := s.store.Add(s.ctx, crsMeta) + s.Require().NoError(err, "adding CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording initiated registration for CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.Error(err, "cluster registration still possible") + + err = s.store.MarkClusterRegistrationComplete(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording completed registration for CRS %s failed", crsId) + + crsMeta, err = s.store.Get(s.ctx, crsId) + s.NoErrorf(err, "receiving CRS %s", crsId) + s.Truef(crsMeta.GetIsRevoked(), "CRS %s is not revoked", crsId) + +} + +// Tests auto revocation for a CRS with maxRegistrations > 1. +func (s *clusterInitStoreTestSuite) TestCrsAutoRevocationAfterTwoRegistrations() { + clusterName := fmt.Sprintf("some-cluster-%s", uuid.NewV4().String()) + crsId := uuid.NewV4().String() + crsMeta := &storage.InitBundleMeta{ + Id: crsId, + Name: fmt.Sprintf("test-crs-auto-revocation-2-%d", rand.IntN(10000)), + CreatedAt: timestamppb.New(time.Now()), + Version: storage.InitBundleMeta_CRS, + MaxRegistrations: 2, + } + err := s.store.Add(s.ctx, crsMeta) + s.Require().NoError(err, "adding CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording initiated registration for CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.NoError(err, "cluster registration not possible") + + err = s.store.MarkClusterRegistrationComplete(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording completed registration for CRS %s failed", crsId) + + crsMeta, err = s.store.Get(s.ctx, crsId) + s.NoErrorf(err, "receiving CRS %s", crsId) + s.Falsef(crsMeta.GetIsRevoked(), "CRS %s is not revoked", crsId) + + clusterName = fmt.Sprintf("some-cluster-%s", uuid.NewV4().String()) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording initiated registration for CRS %s failed", crsId) + + err = s.store.InitiateClusterRegistration(s.ctx, crsId, clusterName) + s.Error(err, "cluster registration still possible") + + err = s.store.MarkClusterRegistrationComplete(s.ctx, crsId, clusterName) + s.NoErrorf(err, "recording completed registration for CRS %s failed", crsId) + + crsMeta, err = s.store.Get(s.ctx, crsId) + s.NoErrorf(err, "receiving CRS %s", crsId) + s.Truef(crsMeta.GetIsRevoked(), "CRS %s is not revoked", crsId) +} diff --git a/central/graphql/resolvers/test_setup_utils.go b/central/graphql/resolvers/test_setup_utils.go index 56b485481dd8c..c15da3a9d0bd4 100644 --- a/central/graphql/resolvers/test_setup_utils.go +++ b/central/graphql/resolvers/test_setup_utils.go @@ -254,7 +254,7 @@ func CreateTestClusterDatastore(t testing.TB, testDB *pgtest.TestPostgres, ctrl connMgr.EXPECT().GetConnection(gomock.Any()).AnyTimes() datastore, err := clusterDataStore.New(storage, healthStorage, clusterCVEDS, nil, nil, namespaceDS, nil, nodeDataStore, nil, nil, - netFlows, netEntities, nil, nil, nil, connMgr, nil, ranking.ClusterRanker(), nil, nil) + netFlows, netEntities, nil, nil, nil, connMgr, nil, ranking.ClusterRanker(), nil, nil, nil) assert.NoError(t, err, "failed to create cluster datastore") return datastore } diff --git a/central/main.go b/central/main.go index 50f62b8a20211..28d9d20d74b89 100644 --- a/central/main.go +++ b/central/main.go @@ -43,6 +43,7 @@ import ( clusterService "github.com/stackrox/rox/central/cluster/service" "github.com/stackrox/rox/central/clusterinit/backend" clusterInitService "github.com/stackrox/rox/central/clusterinit/service" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store/singleton" clustersHelmConfig "github.com/stackrox/rox/central/clusters/helmconfig" clustersZip "github.com/stackrox/rox/central/clusters/zip" complianceDatastore "github.com/stackrox/rox/central/compliance/datastore" @@ -455,7 +456,7 @@ func servicesToRegister() []pkgGRPC.APIService { roleService.Singleton(), searchService.Singleton(), secretService.Singleton(), - sensorService.New(connection.ManagerSingleton(), all.Singleton(), clusterDataStore.Singleton(), installationStore.Singleton()), + sensorService.New(connection.ManagerSingleton(), all.Singleton(), clusterDataStore.Singleton(), installationStore.Singleton(), clusterInitStore.Singleton()), sensorUpgradeControlService.Singleton(), sensorUpgradeService.Singleton(), serviceAccountService.Singleton(), diff --git a/central/pruning/pruning_test.go b/central/pruning/pruning_test.go index 9313db7086fe1..5255c1c6bd5cc 100644 --- a/central/pruning/pruning_test.go +++ b/central/pruning/pruning_test.go @@ -13,6 +13,7 @@ import ( clusterDatastore "github.com/stackrox/rox/central/cluster/datastore" clusterPostgres "github.com/stackrox/rox/central/cluster/store/cluster/postgres" clusterHealthPostgres "github.com/stackrox/rox/central/cluster/store/clusterhealth/postgres" + clusterInitStoreMocks "github.com/stackrox/rox/central/clusterinit/store/mocks" compliancePrunerMocks "github.com/stackrox/rox/central/complianceoperator/v2/pruner/mocks" configDatastore "github.com/stackrox/rox/central/config/datastore" configDatastoreMocks "github.com/stackrox/rox/central/config/datastore/mocks" @@ -357,6 +358,7 @@ func (s *PruningTestSuite) generateClusterDataStructures() (configDatastore.Data clusterFlows := networkFlowDatastoreMocks.NewMockClusterDataStore(mockCtrl) flows := networkFlowDatastoreMocks.NewMockFlowDataStore(mockCtrl) clusterCVEs := clusterCVEDS.NewMockDataStore(mockCtrl) + clusterInitStore := clusterInitStoreMocks.NewMockStore(mockCtrl) // A bunch of these get called when a cluster is deleted flowsDataStore.EXPECT().CreateFlowStore(gomock.Any(), gomock.Any()).AnyTimes().Return(networkFlowDatastoreMocks.NewMockFlowDataStore(mockCtrl), nil) @@ -415,7 +417,8 @@ func (s *PruningTestSuite) generateClusterDataStructures() (configDatastore.Data notifierMock, ranking.NewRanker(), networkBaselineMgr, - compliancePruner) + compliancePruner, + clusterInitStore) require.NoError(s.T(), err) return mockConfigDatastore, deployments, clusterDataStore diff --git a/central/sensor/service/service_impl.go b/central/sensor/service/service_impl.go index 0c179ae841efe..49db7d21e6429 100644 --- a/central/sensor/service/service_impl.go +++ b/central/sensor/service/service_impl.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" installationStore "github.com/stackrox/rox/central/installation/store" "github.com/stackrox/rox/central/metrics/telemetry" "github.com/stackrox/rox/central/securedclustercertgen" @@ -45,19 +46,27 @@ var ( type serviceImpl struct { central.UnimplementedSensorServiceServer - manager connection.Manager - pf pipeline.Factory - clusters clusterDataStore.DataStore - installation installationStore.Store + manager connection.Manager + pf pipeline.Factory + clusters clusterDataStore.DataStore + installation installationStore.Store + clusterInitStore clusterInitStore.Store } // New creates a new Service using the given manager. -func New(manager connection.Manager, pf pipeline.Factory, clusters clusterDataStore.DataStore, installation installationStore.Store) Service { +func New( + manager connection.Manager, + pf pipeline.Factory, + clusters clusterDataStore.DataStore, + installation installationStore.Store, + clusterInitStore clusterInitStore.Store, +) Service { return &serviceImpl{ - manager: manager, - pf: pf, - clusters: clusters, - installation: installation, + manager: manager, + pf: pf, + clusters: clusters, + installation: installation, + clusterInitStore: clusterInitStore, } } @@ -169,7 +178,7 @@ func (s *serviceImpl) Communicate(server central.SensorService_CommunicateServer } return nil }); err != nil { - log.Errorf("Could not include certificate bundle in sensor hello message: %s", err) + log.Errorf("Could not include certificate bundle in sensor hello message: %s.", err) } if err := server.Send(¢ral.MsgToSensor{Msg: ¢ral.MsgToSensor_Hello{Hello: centralHello}}); err != nil { @@ -179,20 +188,45 @@ func (s *serviceImpl) Communicate(server central.SensorService_CommunicateServer if svcType == storage.ServiceType_REGISTRANT_SERVICE { // Terminate connection which uses a CRS certificate at this point. + log.Infof("Terminating initial CRS flow from cluster %s (%s).", cluster.GetName(), cluster.GetId()) return nil } + // At this point sensor is connecting with non-CRS service certificates. This could mean either init bundle certificates + // or freshly issued per-cluster service certificates. + + if cluster.GetHealthStatus().GetLastContact() == nil && cluster.GetInitBundleId() != "" { + // The call to MarkClusterRegistrationComplete also updates the revocation state of the CRS used for this + // cluster, if needed (no-op in init bundle case). + log.Infof("First connection from cluster %s (%s) using a sensor service certificate.", cluster.GetName(), cluster.GetId()) + if err := s.clusterInitStore.MarkClusterRegistrationComplete(clusterDSSAC, cluster.GetInitBundleId(), cluster.GetName()); err != nil { + return errors.Wrapf(err, "updating completed-registrations counter for cluster registration secret %q", cluster.GetInitBundleId()) + } + } + if clusterInitArtifactId := cluster.GetInitBundleId(); clusterInitArtifactId != "" && svc.GetInitBundleId() == "" { + // Sensor has connected with a non-init certificate. + // At this point we can clear the cluster's InitBundleId, irregardless of which init artifact type has been used. + // For CRS, this is the point after which we don't need it anymore for any sort of bookkepping. + // For init bundles, this was previously done in `LookupOrCreateClusterFromConfig()`, now it is being done here, + // slightly later in the flow. + cluster.InitBundleId = "" + if err := s.clusters.UpdateCluster(clusterDSSAC, cluster); err != nil { + return errors.Wrapf(err, "clearing init artifact ID of cluster %s", cluster.GetName()) + } + log.Infof("Cleared init artifact ID (%s) of newly created cluster %s.", clusterInitArtifactId, cluster.GetName()) + } + if expiryStatus, err := getCertExpiryStatus(identity); err != nil { notBefore, notAfter := identity.ValidityPeriod() - log.Warnf("Failed to convert expiry status of sensor cert (NotBefore: %v, Expiry: %v) from cluster %s to proto: %v", + log.Warnf("Failed to convert expiry status of sensor cert (NotBefore: %v, Expiry: %v) from cluster %s to proto: %v.", notBefore, notAfter, cluster.GetId(), err) } else if expiryStatus != nil { if err := s.clusters.UpdateClusterCertExpiryStatus(clusterDSSAC, cluster.GetId(), expiryStatus); err != nil { - log.Warnf("Failed to update cluster expiry status for cluster %s: %v", cluster.GetId(), err) + log.Warnf("Failed to update cluster expiry status for cluster %s: %v.", cluster.GetId(), err) } } - log.Infof("Cluster %s (%s) has successfully connected to Central", cluster.GetName(), cluster.GetId()) + log.Infof("Cluster %s (%s) has successfully connected to Central.", cluster.GetName(), cluster.GetId()) return s.manager.HandleConnection(server.Context(), sensorHello, cluster, eventPipeline, server) } diff --git a/central/sensor/service/service_impl_test.go b/central/sensor/service/service_impl_test.go index 7f1c5baf7feaf..2d5c3ad4d14d5 100644 --- a/central/sensor/service/service_impl_test.go +++ b/central/sensor/service/service_impl_test.go @@ -1,15 +1,22 @@ +//go:build sql_integration + package service import ( "context" + "encoding/json" + "fmt" "io" + "os" "testing" "time" - clusterMock "github.com/stackrox/rox/central/cluster/datastore/mocks" + clusterDataStore "github.com/stackrox/rox/central/cluster/datastore" + clusterInitStore "github.com/stackrox/rox/central/clusterinit/store" installationMock "github.com/stackrox/rox/central/installation/store/mocks" "github.com/stackrox/rox/central/sensor/service/connection" connectionMock "github.com/stackrox/rox/central/sensor/service/connection/mocks" + "github.com/stackrox/rox/central/sensor/service/pipeline" pipelineMock "github.com/stackrox/rox/central/sensor/service/pipeline/mocks" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/generated/storage" @@ -18,12 +25,15 @@ import ( authnMock "github.com/stackrox/rox/pkg/grpc/authn/mocks" "github.com/stackrox/rox/pkg/grpc/authn/service" "github.com/stackrox/rox/pkg/mtls" - "github.com/stackrox/rox/pkg/mtls/testutils" + mtlsTestutils "github.com/stackrox/rox/pkg/mtls/testutils" + "github.com/stackrox/rox/pkg/postgres/pgtest" "github.com/stackrox/rox/pkg/protoassert" "github.com/stackrox/rox/pkg/protocompat" "github.com/stackrox/rox/pkg/sac" + pkgSearch "github.com/stackrox/rox/pkg/search" "github.com/stackrox/rox/pkg/utils" "github.com/stackrox/rox/pkg/uuid" + "github.com/stackrox/rox/pkg/version/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" "go.uber.org/mock/gomock" @@ -118,78 +128,256 @@ func TestIsCARotationSupported(t *testing.T) { } } -// CRS Test Suite. - -type crsTestSuite struct { +// Sensor Service Test Suite (primarily, but not exclusively, for CRS handshake). +type sensorServiceTestSuite struct { suite.Suite - mockCtrl *gomock.Controller - context context.Context + mockCtrl *gomock.Controller + internalContext context.Context + db *pgtest.TestPostgres + clusterInitStore clusterInitStore.Store + clusterDataStore clusterDataStore.DataStore } -func (s *crsTestSuite) SetupTest() { +func (s *sensorServiceTestSuite) SetupSuite() { + imageFlavor := "rhacs" + utils.Must(os.Setenv("ROX_IMAGE_FLAVOR", imageFlavor)) + testutils.SetExampleVersion(s.T()) + s.internalContext = sac.WithAllAccess(context.Background()) + s.db = pgtest.ForT(s.T()) + var err error + s.clusterDataStore, err = clusterDataStore.GetTestPostgresDataStore(s.T(), s.db.DB) + s.Require().NoError(err, "failed to create cluster data store") + s.clusterInitStore = clusterDataStore.IntrospectClusterInitStore(s.T(), s.clusterDataStore) + s.mockCtrl = gomock.NewController(s.T()) - s.context = sac.WithAllAccess(context.Background()) - utils.Should(testutils.LoadTestMTLSCerts(s.T())) + utils.Should(mtlsTestutils.LoadTestMTLSCerts(s.T())) +} + +func (s *sensorServiceTestSuite) SetupTest() { + log.Infof("Running test: %s", s.T().Name()) } -func TestCrs(t *testing.T) { - suite.Run(t, new(crsTestSuite)) +func TestClusterRegistration(t *testing.T) { + suite.Run(t, new(sensorServiceTestSuite)) } -func (s *crsTestSuite) TestCrsCentralReturnsAllServiceCertificates() { - sensorService, mockServer := newSensorService(s.context, s.mockCtrl) +func newClusterNames() (string, string, string) { + baseUuid := uuid.NewV4().String() + base := baseUuid[:9] + return base + "a", base + "b", base + "c" + +} + +func (s *sensorServiceTestSuite) TestCrsCentralReturnsAllServiceCertificates() { + _, crsMeta := newCrsMeta(0) + sensorService := newSensorService(s, crsMeta) + clusterName, _, _ := newClusterNames() + mockServer := newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterName) - // First-time CRS cluster registration. - mockServer.prepareNewHandshake(sensorHello) err := sensorService.Communicate(mockServer) s.NoError(err) - s.Len(mockServer.clustersRegistered, 1, "expected exactly one registered cluster") centralHello := retrieveCentralHello(s, mockServer) - assertCertificateBundleComplete(s, centralHello.GetCertBundle()) -} + _ = centralHello -func assertCertificateBundleComplete(s *crsTestSuite, certBundle map[string]string) { - s.Len(certBundle, 15, "expected 15 entries (1 CA cert, 7 service certs, 7 service keys) in bundle") + assertCertificateBundleComplete(s, centralHello.GetCertBundle()) } -func (s *crsTestSuite) TestCrsFlowCanBeRepeated() { - sensorService, mockServer := newSensorService(s.context, s.mockCtrl) +func (s *sensorServiceTestSuite) TestCrsFlowCanBeRepeated() { + _, crsMeta := newCrsMeta(0) + sensorService := newSensorService(s, crsMeta) + clusterName, _, _ := newClusterNames() // First-time CRS cluster registration. - mockServer.prepareNewHandshake(sensorHello) + mockServer := newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterName) err := sensorService.Communicate(mockServer) s.NoError(err) - s.Len(mockServer.clustersRegistered, 1, "expected exactly one registered cluster") // Initiating the CRS flow a second time should still work. - mockServer.prepareNewHandshake(sensorHello) + mockServer = newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterName) err = sensorService.Communicate(mockServer) s.NoError(err) - s.Len(mockServer.clustersRegistered, 1, "expected exactly one registered cluster") - // Verify that we again got all certificates we need. - centralHello := retrieveCentralHello(s, mockServer) - assertCertificateBundleComplete(s, centralHello.GetCertBundle()) } -func (s *crsTestSuite) TestCrsFlowFailsAfterLastContact() { - sensorService, mockServer := newSensorService(s.context, s.mockCtrl) +func (s *sensorServiceTestSuite) TestCrsFlowFailsAfterRegistrationComplete() { + _, crsMeta := newCrsMeta(0) + sensorService := newSensorService(s, crsMeta) + clusterName, _, _ := newClusterNames() // CRS cluster registration. - mockServer.prepareNewHandshake(sensorHello) + mockServer := newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterName) err := sensorService.Communicate(mockServer) s.NoError(err) - // Simulate a connection with service certificates has occurred by updating - // the LastContact field of a cluster. - mockServer.clustersRegistered[0].HealthStatus = &storage.ClusterHealthStatus{ - LastContact: timestamppb.Now(), - } + // Regular connect. + mockServer = newMockServerForRegularConnect(s, sensorDeploymentIdentificationA, clusterName) + err = sensorService.Communicate(mockServer) + s.NoError(err) // Initiating the CRS should fail now. - mockServer.prepareNewHandshake(sensorHello) + mockServer = newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterName) err = sensorService.Communicate(mockServer) + s.Error(err, "CRS flow succeeded even after regular connect.") s.ErrorContains(err, "forbidden to use a Cluster Registration Certificate for already-existing cluster") - s.Error(err, "CRS flow succeeded even after LastContact field was updated.") +} + +func lookupCrs(s *sensorServiceTestSuite, crsId string) *storage.InitBundleMeta { + crsMeta, err := s.clusterInitStore.Get(s.internalContext, crsId) + s.Require().NoError(err) + return crsMeta +} + +func (s *sensorServiceTestSuite) TestClusterRegistrationWithOneShotCrs() { + crsMetaId, crsMeta := newCrsMeta(1) + sensorService := newSensorService(s, crsMeta) + clusterNameA, clusterNameB, _ := newClusterNames() + + mockServer := newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterNameA) + + // CRS cluster registration. + err := sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that cluster registrations is initiated. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated := crsMeta.GetRegistrationsInitiated() + s.Len(registrationsInitiated, 1, "Unexpected number of initiated registrations for CRS") + assert.Containsf(s.T(), registrationsInitiated, clusterNameA, "registrationsInitiated (%v) does not contain %q", registrationsInitiated, clusterNameA) + + // Attempt another cluster registration. + mockServer = newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterNameB) + err = sensorService.Communicate(mockServer) + s.Error(err) + + // Verify that no other cluster registration is initiated. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated = crsMeta.GetRegistrationsInitiated() + s.Len(registrationsInitiated, 1, "Unexpected number of initiated registrations for CRS after second registration attempt") + + // Execute a regular connect. + mockServer = newMockServerForRegularConnect(s, sensorDeploymentIdentificationA, clusterNameA) + err = sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that cluster registrations is completed. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated = crsMeta.GetRegistrationsInitiated() + registrationsCompleted := crsMeta.GetRegistrationsCompleted() + s.Len(registrationsInitiated, 0, "Unexpected number of initiated registrations for CRS") + s.Len(registrationsCompleted, 1, "Unexpected number of initiated registrations for CRS") + assert.Containsf(s.T(), registrationsCompleted, clusterNameA, "registrationsCompleted (%v) does not contain %q", registrationsCompleted, clusterNameA) + + // Verify CRS is revoked. + crsMeta = lookupCrs(s, crsMetaId) + s.True(crsMeta.GetIsRevoked(), "CRS is not revoked after one-shot use") +} + +func toPrettyJson(s *sensorServiceTestSuite, v any) string { + bytes, err := json.MarshalIndent(v, "|", " ") + s.Require().NoErrorf(err, "JSON marshalling of value %+v failed", v) + return string(bytes) +} + +func (s *sensorServiceTestSuite) TestClusterRegistrationWithTwoLimitCrs() { + crsMetaId, crsMeta := newCrsMeta(2) + sensorService := newSensorService(s, crsMeta) + clusterNameA, clusterNameB, clusterNameC := newClusterNames() + mockServer := newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterNameA) + + // CRS cluster registration. + err := sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that cluster registrations is initiated. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated := crsMeta.GetRegistrationsInitiated() + s.Len(registrationsInitiated, 1, "Unexpected number of initiated registrations for CRS:\n%s\n", toPrettyJson(s, crsMeta)) + assert.Containsf(s.T(), registrationsInitiated, clusterNameA, "registrationsInitiated (%v) does not contain %q", registrationsInitiated, clusterNameA) + + // Attempt another cluster registration. + mockServer = newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationB, clusterNameB) + err = sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that cluster registrations is initiated. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated = crsMeta.GetRegistrationsInitiated() + s.Len(registrationsInitiated, 2, "Unexpected number of initiated registrations for CRS") + assert.Containsf(s.T(), registrationsInitiated, clusterNameB, "registrationsInitiated (%v) does not contain %q", registrationsInitiated, clusterNameB) + + // Execute regular connects. + mockServer = newMockServerForRegularConnect(s, sensorDeploymentIdentificationA, clusterNameA) + err = sensorService.Communicate(mockServer) + s.NoError(err) + mockServer = newMockServerForRegularConnect(s, sensorDeploymentIdentificationB, clusterNameB) + err = sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that cluster registrations is completed. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated = crsMeta.GetRegistrationsInitiated() + registrationsCompleted := crsMeta.GetRegistrationsCompleted() + s.Len(registrationsInitiated, 0, "Unexpected number of initiated registrations for CRS") + s.Len(registrationsCompleted, 2, "Unexpected number of initiated registrations for CRS") + assert.Containsf(s.T(), registrationsCompleted, clusterNameA, "registrationsCompleted (%v) does not contain %q", registrationsCompleted, clusterNameA) + assert.Containsf(s.T(), registrationsCompleted, clusterNameB, "registrationsCompleted (%v) does not contain %q", registrationsCompleted, clusterNameB) + + // Attempt another cluster registration. + mockServer = newMockServerForCrsHandshake(s, crsMeta, sensorDeploymentIdentificationA, clusterNameC) + err = sensorService.Communicate(mockServer) + s.Error(err) + + // Verify that no other cluster registration has been recorded. + crsMeta = lookupCrs(s, crsMetaId) + registrationsInitiated = crsMeta.GetRegistrationsInitiated() + registrationsCompleted = crsMeta.GetRegistrationsCompleted() + s.Len(registrationsInitiated, 0, "Unexpected number of initiated registrations for CRS after third registration attempt") + s.Len(registrationsCompleted, 2, "Unexpected number of completed registrations for CRS after third registration attempt") + + // Verify CRS is revoked. + crsMeta = lookupCrs(s, crsMetaId) + s.True(crsMeta.GetIsRevoked(), "CRS is not revoked after registering two clusters") +} + +func (s *sensorServiceTestSuite) lookupClusterByName(name string) *storage.Cluster { + query := pkgSearch.NewQueryBuilder().AddExactMatches(pkgSearch.Cluster, name).ProtoQuery() + results, err := s.clusterDataStore.Search(s.internalContext, query) + s.NoError(err) + s.Lenf(results, 1, "unexpected number of search results when looking for cluster by name %s", name) + + resultIds := pkgSearch.ResultsToIDs(results) + s.Len(resultIds, 1) + + clusterId := resultIds[0] + cluster, ok, err := s.clusterDataStore.GetCluster(s.internalContext, clusterId) + s.NoErrorf(err, "failed to retrieve cluster %s (%s)", name) + s.True(ok) + + return cluster +} + +func (s *sensorServiceTestSuite) TestClusterRegistrationWithInitBundle() { + initBundleId, initBundleMeta := newInitBundleMeta() + sensorService := newSensorService(s, initBundleMeta) + clusterNameA, _, _ := newClusterNames() + + mockServer := newMockServerForInitBundleHandshake(s, initBundleMeta, sensorDeploymentIdentificationA, clusterNameA) + + // Init Bundle cluster registration. + err := sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that init bundle is still associated with cluster. + cluster := s.lookupClusterByName(clusterNameA) + s.NotEmptyf(cluster.GetInitBundleId(), "cluster %s lost association to init bundle %s", clusterNameA, initBundleId) + + // Simulate regular connection with non-init certificate. + mockServer = newMockServerForRegularConnect(s, sensorDeploymentIdentificationA, clusterNameA) + err = sensorService.Communicate(mockServer) + s.NoError(err) + + // Verify that init bundle is not associated with cluster anymore. + cluster = s.lookupClusterByName(clusterNameA) + s.Emptyf(cluster.GetInitBundleId(), "cluster %s still association with init bundle %s", clusterNameA, initBundleId) } // Implementation of a simple mock server to be used in the CRS test suite. @@ -201,24 +389,66 @@ type mockServer struct { clustersRegistered []*storage.Cluster } -func newMockServer(ctx context.Context, ctrl *gomock.Controller) *mockServer { - mockIdentity := authnMock.NewMockIdentity(ctrl) - mockIdentity.EXPECT().Service().AnyTimes().Return(®istrantIdentity) +func newMockServer(s *sensorServiceTestSuite, identity *storage.ServiceIdentity) *mockServer { + notBefore := time.Now().Add(-time.Minute) + notAfter := time.Now().Add(time.Hour) + + mockIdentity := authnMock.NewMockIdentity(s.mockCtrl) + mockIdentity.EXPECT().Service().AnyTimes().Return(identity) + mockIdentity.EXPECT().ValidityPeriod().AnyTimes().Return(notBefore, notAfter) md := metadata.Pairs(centralsensor.SensorHelloMetadataKey, "true") - ctx = authn.ContextWithIdentity(ctx, mockIdentity, nil) + ctx := authn.ContextWithIdentity(s.internalContext, mockIdentity, nil) ctx = metadata.NewIncomingContext(ctx, md) return &mockServer{ context: ctx, } } -func (s *mockServer) prepareNewHandshake(hello *central.SensorHello) { - s.msgsFromSensor = []*central.MsgFromSensor{ +func prepareHelloHandshake(m *mockServer, sensorDeploymentId *storage.SensorDeploymentIdentification, clusterName string) { + hello := ¢ral.SensorHello{ + SensorVersion: "1.0", + DeploymentIdentification: sensorDeploymentId, + HelmManagedConfigInit: ¢ral.HelmManagedConfigInit{ + ClusterName: clusterName, + ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, + }, + } + m.msgsFromSensor = []*central.MsgFromSensor{ { Msg: ¢ral.MsgFromSensor_Hello{Hello: hello}, }, } - s.msgsToSensor = nil + m.msgsToSensor = nil +} + +func newMockServerForCrsHandshake(s *sensorServiceTestSuite, crsMeta *storage.InitBundleMeta, sensorDeploymentId *storage.SensorDeploymentIdentification, clusterName string) *mockServer { + identity := &storage.ServiceIdentity{ + Type: storage.ServiceType_REGISTRANT_SERVICE, + InitBundleId: crsMeta.GetId(), + } + + m := newMockServer(s, identity) + prepareHelloHandshake(m, sensorDeploymentId, clusterName) + return m +} + +func newMockServerForInitBundleHandshake(s *sensorServiceTestSuite, crsMeta *storage.InitBundleMeta, sensorDeploymentId *storage.SensorDeploymentIdentification, clusterName string) *mockServer { + identity := &storage.ServiceIdentity{ + Type: storage.ServiceType_SENSOR_SERVICE, + InitBundleId: crsMeta.GetId(), + } + + m := newMockServer(s, identity) + prepareHelloHandshake(m, sensorDeploymentId, clusterName) + return m +} + +func newMockServerForRegularConnect(s *sensorServiceTestSuite, sensorDeploymentId *storage.SensorDeploymentIdentification, clusterName string) *mockServer { + m := newMockServer(s, &storage.ServiceIdentity{ + Type: storage.ServiceType_SENSOR_SERVICE, + }) + prepareHelloHandshake(m, sensorDeploymentId, clusterName) + return m } func (s *mockServer) Context() context.Context { @@ -268,7 +498,7 @@ func newCluster(clusterName string, hello *central.SensorHello) *storage.Cluster } } -func retrieveCentralHello(s *crsTestSuite, server *mockServer) *central.CentralHello { +func retrieveCentralHello(s *sensorServiceTestSuite, server *mockServer) *central.CentralHello { s.NotEmpty(server.msgsToSensor, "no central response message") centralMsg := server.msgsToSensor[0] centralHello := centralMsg.GetHello() @@ -276,60 +506,93 @@ func retrieveCentralHello(s *crsTestSuite, server *mockServer) *central.CentralH return centralHello } -var registrantIdentity = storage.ServiceIdentity{ - Type: storage.ServiceType_REGISTRANT_SERVICE, -} - -var installInfo *storage.InstallationInfo = &storage.InstallationInfo{ - Id: "some-central-id", -} - -var sensorDeploymentIdentification = &storage.SensorDeploymentIdentification{ +var sensorDeploymentIdentificationA = &storage.SensorDeploymentIdentification{ SystemNamespaceId: uuid.NewV4().String(), DefaultNamespaceId: uuid.NewV4().String(), - AppNamespace: "my-stackrox-namespace", + AppNamespace: "my-stackrox-namespace-a", AppNamespaceId: uuid.NewV4().String(), AppServiceaccountId: uuid.NewV4().String(), K8SNodeName: "my-node", } -var sensorHello = ¢ral.SensorHello{ - SensorVersion: "1.0", - DeploymentIdentification: sensorDeploymentIdentification, - HelmManagedConfigInit: ¢ral.HelmManagedConfigInit{ - ClusterName: "my-new-cluster", - ManagedBy: storage.ManagerType_MANAGER_TYPE_HELM_CHART, - }, +var sensorDeploymentIdentificationB = &storage.SensorDeploymentIdentification{ + SystemNamespaceId: uuid.NewV4().String(), + DefaultNamespaceId: uuid.NewV4().String(), + AppNamespace: "my-stackrox-namespace-b", + AppNamespaceId: uuid.NewV4().String(), + AppServiceaccountId: uuid.NewV4().String(), + K8SNodeName: "my-node", } -func newSensorService(ctx context.Context, ctrl *gomock.Controller) (Service, *mockServer) { - mockServer := newMockServer(ctx, ctrl) - - mockInstallation := installationMock.NewMockStore(ctrl) +func newSensorService(s *sensorServiceTestSuite, crsMeta *storage.InitBundleMeta) Service { + mockInstallation := installationMock.NewMockStore(s.mockCtrl) + installInfo := &storage.InstallationInfo{ + Id: "some-central-id", + } mockInstallation.EXPECT().Get(gomock.Any()).AnyTimes().Return(installInfo, true, nil) - mockConnetionManager := connectionMock.NewMockManager(ctrl) - mockConnetionManager.EXPECT().GetConnectionPreference(gomock.Any()).AnyTimes().Return(connection.Preferences{}) + mockConnectionManager := connectionMock.NewMockManager(s.mockCtrl) + mockConnectionManager.EXPECT(). + GetConnectionPreference(gomock.Any()). + AnyTimes().Return(connection.Preferences{}) + mockConnectionManager.EXPECT(). + HandleConnection(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + AnyTimes(). + DoAndReturn(mockHandleConnection(s.clusterDataStore)) - pipelineFactory := pipelineMock.NewMockFactory(ctrl) - pipeline := pipelineMock.NewMockClusterPipeline(ctrl) + pipelineFactory := pipelineMock.NewMockFactory(s.mockCtrl) + pipeline := pipelineMock.NewMockClusterPipeline(s.mockCtrl) pipeline.EXPECT().Capabilities().AnyTimes().Return([]centralsensor.CentralCapability{}) pipelineFactory.EXPECT().PipelineForCluster(gomock.Any(), gomock.Any()).AnyTimes().Return(pipeline, nil) - clustersDataStore := clusterMock.NewMockDataStore(ctrl) - clustersDataStore.EXPECT().LookupOrCreateClusterFromConfig( - gomock.Any(), // ctx - gomock.Any(), // clusterID - gomock.Any(), // bundleID - gomock.Any(), // sensorHello - ). - AnyTimes(). - DoAndReturn( - func(_ctx context.Context, clusterId, _bundleId string, hello *central.SensorHello) (*storage.Cluster, error) { - return mockServer.LookupOrCreateClusterFromConfig(clusterId, hello) - }, - ) + clusterInitStore := clusterDataStore.IntrospectClusterInitStore(s.T(), s.clusterDataStore) + + if crsMeta != nil { + err := clusterInitStore.Add(s.internalContext, crsMeta) + s.NoError(err, "failed adding dummy CRS to cluster init store") + s.T().Logf("Added dummy CRS with ID %q and name %q", crsMeta.GetId(), crsMeta.GetName()) + } - sensorService := New(mockConnetionManager, pipelineFactory, clustersDataStore, mockInstallation) - return sensorService, mockServer + sensorService := New(mockConnectionManager, pipelineFactory, s.clusterDataStore, mockInstallation, clusterInitStore) + return sensorService +} + +func newCrsMeta(maxRegistrations uint64) (string, *storage.InitBundleMeta) { + id := uuid.NewV4().String() + meta := &storage.InitBundleMeta{ + Id: id, + Name: fmt.Sprintf("name-%s", id), + CreatedAt: timestamppb.New(time.Now()), + ExpiresAt: timestamppb.New(time.Now().Add(10 * time.Minute)), + Version: storage.InitBundleMeta_CRS, + MaxRegistrations: maxRegistrations, + } + return meta.GetId(), meta +} + +func newInitBundleMeta() (string, *storage.InitBundleMeta) { + id := uuid.NewV4().String() + meta := &storage.InitBundleMeta{ + Id: id, + Name: fmt.Sprintf("init-bundle-%s", id), + CreatedAt: timestamppb.New(time.Now()), + ExpiresAt: timestamppb.New(time.Now().Add(10 * time.Minute)), + Version: storage.InitBundleMeta_INIT_BUNDLE, + } + return meta.GetId(), meta +} + +func assertCertificateBundleComplete(s *sensorServiceTestSuite, certBundle map[string]string) { + s.Len(certBundle, 15, "expected 15 entries (1 CA cert, 7 service certs, 7 service keys) in bundle") +} + +type HandleConnectionFunc = func(ctx context.Context, _ *central.SensorHello, cluster *storage.Cluster, _ pipeline.ClusterPipeline, _ central.SensorService_CommunicateServer) error + +func mockHandleConnection(clusterDataStore clusterDataStore.DataStore) HandleConnectionFunc { + return func(ctx context.Context, _ *central.SensorHello, cluster *storage.Cluster, _ pipeline.ClusterPipeline, _ central.SensorService_CommunicateServer) error { + cluster.HealthStatus = &storage.ClusterHealthStatus{ + LastContact: timestamppb.New(time.Now()), + } + return clusterDataStore.UpdateCluster(ctx, cluster) + } } diff --git a/generated/api/v1/cluster_init_service.pb.go b/generated/api/v1/cluster_init_service.pb.go index cb3f7c7398a21..495aa78b6fd85 100644 --- a/generated/api/v1/cluster_init_service.pb.go +++ b/generated/api/v1/cluster_init_service.pb.go @@ -110,14 +110,17 @@ func (x *InitBundleMeta) GetExpiresAt() *timestamppb.Timestamp { } type CRSMeta struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - CreatedBy *storage.User `protobuf:"bytes,4,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` - ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + CreatedBy *storage.User `protobuf:"bytes,4,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` + ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + MaxRegistrations uint64 `protobuf:"varint,7,opt,name=max_registrations,json=maxRegistrations,proto3" json:"max_registrations,omitempty"` + RegistrationsInitiated []string `protobuf:"bytes,8,rep,name=registrations_initiated,json=registrationsInitiated,proto3" json:"registrations_initiated,omitempty"` + RegistrationsCompleted []string `protobuf:"bytes,9,rep,name=registrations_completed,json=registrationsCompleted,proto3" json:"registrations_completed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CRSMeta) Reset() { @@ -185,6 +188,27 @@ func (x *CRSMeta) GetExpiresAt() *timestamppb.Timestamp { return nil } +func (x *CRSMeta) GetMaxRegistrations() uint64 { + if x != nil { + return x.MaxRegistrations + } + return 0 +} + +func (x *CRSMeta) GetRegistrationsInitiated() []string { + if x != nil { + return x.RegistrationsInitiated + } + return nil +} + +func (x *CRSMeta) GetRegistrationsCompleted() []string { + if x != nil { + return x.RegistrationsCompleted + } + return nil +} + type InitBundleGenResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Meta *InitBundleMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` @@ -518,11 +542,12 @@ func (x *CRSGenRequest) GetName() string { } type CRSGenRequestExtended struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - ValidUntil *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=valid_until,json=validUntil,proto3" json:"valid_until,omitempty"` - ValidFor *durationpb.Duration `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` - MaxRegistrations int32 `protobuf:"varint,4,opt,name=max_registrations,json=maxRegistrations,proto3" json:"max_registrations,omitempty"` // Support for this is to be implemented in ROX-26769. + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ValidUntil *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=valid_until,json=validUntil,proto3" json:"valid_until,omitempty"` + ValidFor *durationpb.Duration `protobuf:"bytes,3,opt,name=valid_for,json=validFor,proto3" json:"valid_for,omitempty"` + // 4 was int32 max_registrations + MaxRegistrations uint64 `protobuf:"varint,5,opt,name=max_registrations,json=maxRegistrations,proto3" json:"max_registrations,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -578,7 +603,7 @@ func (x *CRSGenRequestExtended) GetValidFor() *durationpb.Duration { return nil } -func (x *CRSGenRequestExtended) GetMaxRegistrations() int32 { +func (x *CRSGenRequestExtended) GetMaxRegistrations() uint64 { if x != nil { return x.MaxRegistrations } @@ -966,7 +991,7 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "expires_at\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x1a5\n" + "\x0fImpactedCluster\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + - "\x02id\x18\x02 \x01(\tR\x02id\"\xd1\x01\n" + + "\x02id\x18\x02 \x01(\tR\x02id\"\xf0\x02\n" + "\aCRSMeta\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x129\n" + @@ -975,7 +1000,10 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "\n" + "created_by\x18\x04 \x01(\v2\r.storage.UserR\tcreatedBy\x129\n" + "\n" + - "expires_at\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\"\x94\x01\n" + + "expires_at\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x12+\n" + + "\x11max_registrations\x18\a \x01(\x04R\x10maxRegistrations\x127\n" + + "\x17registrations_initiated\x18\b \x03(\tR\x16registrationsInitiated\x127\n" + + "\x17registrations_completed\x18\t \x03(\tR\x16registrationsCompleted\"\x94\x01\n" + "\x15InitBundleGenResponse\x12&\n" + "\x04meta\x18\x01 \x01(\v2\x12.v1.InitBundleMetaR\x04meta\x12,\n" + "\x12helm_values_bundle\x18\x02 \x01(\fR\x10helmValuesBundle\x12%\n" + @@ -998,7 +1026,7 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "\vvalid_until\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + "validUntil\x126\n" + "\tvalid_for\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\bvalidFor\x12+\n" + - "\x11max_registrations\x18\x04 \x01(\x05R\x10maxRegistrations\"n\n" + + "\x11max_registrations\x18\x05 \x01(\x04R\x10maxRegistrations\"n\n" + "\x17InitBundleRevokeRequest\x12\x10\n" + "\x03ids\x18\x01 \x03(\tR\x03ids\x12A\n" + "\x1dconfirm_impacted_clusters_ids\x18\x02 \x03(\tR\x1aconfirmImpactedClustersIds\"$\n" + diff --git a/generated/api/v1/cluster_init_service.swagger.json b/generated/api/v1/cluster_init_service.swagger.json index dcb158df243d0..13f0c54ee0ea7 100644 --- a/generated/api/v1/cluster_init_service.swagger.json +++ b/generated/api/v1/cluster_init_service.swagger.json @@ -367,9 +367,9 @@ "type": "string" }, "maxRegistrations": { - "type": "integer", - "format": "int32", - "description": "Support for this is to be implemented in ROX-26769." + "type": "string", + "format": "uint64", + "title": "4 was int32 max_registrations" } } }, @@ -404,6 +404,22 @@ "expiresAt": { "type": "string", "format": "date-time" + }, + "maxRegistrations": { + "type": "string", + "format": "uint64" + }, + "registrationsInitiated": { + "type": "array", + "items": { + "type": "string" + } + }, + "registrationsCompleted": { + "type": "array", + "items": { + "type": "string" + } } } }, diff --git a/generated/api/v1/cluster_init_service_vtproto.pb.go b/generated/api/v1/cluster_init_service_vtproto.pb.go index 0e87f89e3d1f4..e27e91d3beb38 100644 --- a/generated/api/v1/cluster_init_service_vtproto.pb.go +++ b/generated/api/v1/cluster_init_service_vtproto.pb.go @@ -86,6 +86,7 @@ func (m *CRSMeta) CloneVT() *CRSMeta { r.Name = m.Name r.CreatedAt = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.CreatedAt).CloneVT()) r.ExpiresAt = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.ExpiresAt).CloneVT()) + r.MaxRegistrations = m.MaxRegistrations if rhs := m.CreatedBy; rhs != nil { if vtpb, ok := interface{}(rhs).(interface{ CloneVT() *storage.User }); ok { r.CreatedBy = vtpb.CloneVT() @@ -93,6 +94,16 @@ func (m *CRSMeta) CloneVT() *CRSMeta { r.CreatedBy = proto.Clone(rhs).(*storage.User) } } + if rhs := m.RegistrationsInitiated; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RegistrationsInitiated = tmpContainer + } + if rhs := m.RegistrationsCompleted; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RegistrationsCompleted = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -519,6 +530,27 @@ func (this *CRSMeta) EqualVT(that *CRSMeta) bool { if !(*timestamppb1.Timestamp)(this.ExpiresAt).EqualVT((*timestamppb1.Timestamp)(that.ExpiresAt)) { return false } + if this.MaxRegistrations != that.MaxRegistrations { + return false + } + if len(this.RegistrationsInitiated) != len(that.RegistrationsInitiated) { + return false + } + for i, vx := range this.RegistrationsInitiated { + vy := that.RegistrationsInitiated[i] + if vx != vy { + return false + } + } + if len(this.RegistrationsCompleted) != len(that.RegistrationsCompleted) { + return false + } + for i, vx := range this.RegistrationsCompleted { + vy := that.RegistrationsCompleted[i] + if vx != vy { + return false + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -1109,6 +1141,29 @@ func (m *CRSMeta) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.RegistrationsCompleted) > 0 { + for iNdEx := len(m.RegistrationsCompleted) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RegistrationsCompleted[iNdEx]) + copy(dAtA[i:], m.RegistrationsCompleted[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RegistrationsCompleted[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.RegistrationsInitiated) > 0 { + for iNdEx := len(m.RegistrationsInitiated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RegistrationsInitiated[iNdEx]) + copy(dAtA[i:], m.RegistrationsInitiated[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RegistrationsInitiated[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.MaxRegistrations != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxRegistrations)) + i-- + dAtA[i] = 0x38 + } if m.ExpiresAt != nil { size, err := (*timestamppb1.Timestamp)(m.ExpiresAt).MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -1518,7 +1573,7 @@ func (m *CRSGenRequestExtended) MarshalToSizedBufferVT(dAtA []byte) (int, error) if m.MaxRegistrations != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxRegistrations)) i-- - dAtA[i] = 0x20 + dAtA[i] = 0x28 } if m.ValidFor != nil { size, err := (*durationpb1.Duration)(m.ValidFor).MarshalToSizedBufferVT(dAtA[:i]) @@ -1949,6 +2004,21 @@ func (m *CRSMeta) SizeVT() (n int) { l = (*timestamppb1.Timestamp)(m.ExpiresAt).SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.MaxRegistrations != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxRegistrations)) + } + if len(m.RegistrationsInitiated) > 0 { + for _, s := range m.RegistrationsInitiated { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RegistrationsCompleted) > 0 { + for _, s := range m.RegistrationsCompleted { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -2805,6 +2875,89 @@ func (m *CRSMeta) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) + } + m.MaxRegistrations = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRegistrations |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsInitiated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistrationsInitiated = append(m.RegistrationsInitiated, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsCompleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistrationsCompleted = append(m.RegistrationsCompleted, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -3657,7 +3810,7 @@ func (m *CRSGenRequestExtended) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) } @@ -3671,7 +3824,7 @@ func (m *CRSGenRequestExtended) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRegistrations |= int32(b&0x7F) << shift + m.MaxRegistrations |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5007,6 +5160,97 @@ func (m *CRSMeta) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) + } + m.MaxRegistrations = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRegistrations |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsInitiated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.RegistrationsInitiated = append(m.RegistrationsInitiated, stringValue) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsCompleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.RegistrationsCompleted = append(m.RegistrationsCompleted, stringValue) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -5859,7 +6103,7 @@ func (m *CRSGenRequestExtended) UnmarshalVTUnsafe(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) } @@ -5873,7 +6117,7 @@ func (m *CRSGenRequestExtended) UnmarshalVTUnsafe(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRegistrations |= int32(b&0x7F) << shift + m.MaxRegistrations |= uint64(b&0x7F) << shift if b < 0x80 { break } diff --git a/generated/storage/cluster_init.pb.go b/generated/storage/cluster_init.pb.go index 2d59ee65afea3..e145a4a0f8d52 100644 --- a/generated/storage/cluster_init.pb.go +++ b/generated/storage/cluster_init.pb.go @@ -69,16 +69,19 @@ func (InitBundleMeta_InitBundleVersion) EnumDescriptor() ([]byte, []int) { } type InitBundleMeta struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" sql:"pk"` // @gotags: sql:"pk" - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - CreatedBy *User `protobuf:"bytes,4,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` - IsRevoked bool `protobuf:"varint,5,opt,name=is_revoked,json=isRevoked,proto3" json:"is_revoked,omitempty"` - ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` - Version InitBundleMeta_InitBundleVersion `protobuf:"varint,7,opt,name=version,proto3,enum=storage.InitBundleMeta_InitBundleVersion" json:"version,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" sql:"pk"` // @gotags: sql:"pk" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + CreatedBy *User `protobuf:"bytes,4,opt,name=created_by,json=createdBy,proto3" json:"created_by,omitempty"` + IsRevoked bool `protobuf:"varint,5,opt,name=is_revoked,json=isRevoked,proto3" json:"is_revoked,omitempty"` + ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + Version InitBundleMeta_InitBundleVersion `protobuf:"varint,7,opt,name=version,proto3,enum=storage.InitBundleMeta_InitBundleVersion" json:"version,omitempty"` + MaxRegistrations uint64 `protobuf:"varint,8,opt,name=max_registrations,json=maxRegistrations,proto3" json:"max_registrations,omitempty"` + RegistrationsInitiated []string `protobuf:"bytes,9,rep,name=registrations_initiated,json=registrationsInitiated,proto3" json:"registrations_initiated,omitempty"` + RegistrationsCompleted []string `protobuf:"bytes,10,rep,name=registrations_completed,json=registrationsCompleted,proto3" json:"registrations_completed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InitBundleMeta) Reset() { @@ -160,11 +163,32 @@ func (x *InitBundleMeta) GetVersion() InitBundleMeta_InitBundleVersion { return InitBundleMeta_INIT_BUNDLE } +func (x *InitBundleMeta) GetMaxRegistrations() uint64 { + if x != nil { + return x.MaxRegistrations + } + return 0 +} + +func (x *InitBundleMeta) GetRegistrationsInitiated() []string { + if x != nil { + return x.RegistrationsInitiated + } + return nil +} + +func (x *InitBundleMeta) GetRegistrationsCompleted() []string { + if x != nil { + return x.RegistrationsCompleted + } + return nil +} + var File_storage_cluster_init_proto protoreflect.FileDescriptor const file_storage_cluster_init_proto_rawDesc = "" + "\n" + - "\x1astorage/cluster_init.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x12storage/user.proto\"\xeb\x02\n" + + "\x1astorage/cluster_init.proto\x12\astorage\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x12storage/user.proto\"\x8a\x04\n" + "\x0eInitBundleMeta\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x129\n" + @@ -176,7 +200,11 @@ const file_storage_cluster_init_proto_rawDesc = "" + "is_revoked\x18\x05 \x01(\bR\tisRevoked\x129\n" + "\n" + "expires_at\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x12C\n" + - "\aversion\x18\a \x01(\x0e2).storage.InitBundleMeta.InitBundleVersionR\aversion\"-\n" + + "\aversion\x18\a \x01(\x0e2).storage.InitBundleMeta.InitBundleVersionR\aversion\x12+\n" + + "\x11max_registrations\x18\b \x01(\x04R\x10maxRegistrations\x127\n" + + "\x17registrations_initiated\x18\t \x03(\tR\x16registrationsInitiated\x127\n" + + "\x17registrations_completed\x18\n" + + " \x03(\tR\x16registrationsCompleted\"-\n" + "\x11InitBundleVersion\x12\x0f\n" + "\vINIT_BUNDLE\x10\x00\x12\a\n" + "\x03CRS\x10\x01B.\n" + diff --git a/generated/storage/cluster_init_vtproto.pb.go b/generated/storage/cluster_init_vtproto.pb.go index 976f5263afb4a..d03ffce3f8e32 100644 --- a/generated/storage/cluster_init_vtproto.pb.go +++ b/generated/storage/cluster_init_vtproto.pb.go @@ -34,6 +34,17 @@ func (m *InitBundleMeta) CloneVT() *InitBundleMeta { r.IsRevoked = m.IsRevoked r.ExpiresAt = (*timestamppb.Timestamp)((*timestamppb1.Timestamp)(m.ExpiresAt).CloneVT()) r.Version = m.Version + r.MaxRegistrations = m.MaxRegistrations + if rhs := m.RegistrationsInitiated; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RegistrationsInitiated = tmpContainer + } + if rhs := m.RegistrationsCompleted; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RegistrationsCompleted = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -72,6 +83,27 @@ func (this *InitBundleMeta) EqualVT(that *InitBundleMeta) bool { if this.Version != that.Version { return false } + if this.MaxRegistrations != that.MaxRegistrations { + return false + } + if len(this.RegistrationsInitiated) != len(that.RegistrationsInitiated) { + return false + } + for i, vx := range this.RegistrationsInitiated { + vy := that.RegistrationsInitiated[i] + if vx != vy { + return false + } + } + if len(this.RegistrationsCompleted) != len(that.RegistrationsCompleted) { + return false + } + for i, vx := range this.RegistrationsCompleted { + vy := that.RegistrationsCompleted[i] + if vx != vy { + return false + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -112,6 +144,29 @@ func (m *InitBundleMeta) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.RegistrationsCompleted) > 0 { + for iNdEx := len(m.RegistrationsCompleted) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RegistrationsCompleted[iNdEx]) + copy(dAtA[i:], m.RegistrationsCompleted[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RegistrationsCompleted[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.RegistrationsInitiated) > 0 { + for iNdEx := len(m.RegistrationsInitiated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RegistrationsInitiated[iNdEx]) + copy(dAtA[i:], m.RegistrationsInitiated[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RegistrationsInitiated[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if m.MaxRegistrations != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.MaxRegistrations)) + i-- + dAtA[i] = 0x40 + } if m.Version != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Version)) i-- @@ -206,6 +261,21 @@ func (m *InitBundleMeta) SizeVT() (n int) { if m.Version != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.Version)) } + if m.MaxRegistrations != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.MaxRegistrations)) + } + if len(m.RegistrationsInitiated) > 0 { + for _, s := range m.RegistrationsInitiated { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.RegistrationsCompleted) > 0 { + for _, s := range m.RegistrationsCompleted { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -450,6 +520,89 @@ func (m *InitBundleMeta) UnmarshalVT(dAtA []byte) error { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) + } + m.MaxRegistrations = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRegistrations |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsInitiated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistrationsInitiated = append(m.RegistrationsInitiated, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsCompleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistrationsCompleted = append(m.RegistrationsCompleted, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -720,6 +873,97 @@ func (m *InitBundleMeta) UnmarshalVTUnsafe(dAtA []byte) error { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRegistrations", wireType) + } + m.MaxRegistrations = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRegistrations |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsInitiated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.RegistrationsInitiated = append(m.RegistrationsInitiated, stringValue) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistrationsCompleted", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var stringValue string + if intStringLen > 0 { + stringValue = unsafe.String(&dAtA[iNdEx], intStringLen) + } + m.RegistrationsCompleted = append(m.RegistrationsCompleted, stringValue) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/proto/api/v1/cluster_init_service.proto b/proto/api/v1/cluster_init_service.proto index 4c411dd395b13..c90dacf510f59 100644 --- a/proto/api/v1/cluster_init_service.proto +++ b/proto/api/v1/cluster_init_service.proto @@ -31,6 +31,9 @@ message CRSMeta { google.protobuf.Timestamp created_at = 3; storage.User created_by = 4; google.protobuf.Timestamp expires_at = 5; + uint64 max_registrations = 7; + repeated string registrations_initiated = 8; + repeated string registrations_completed = 9; } message InitBundleGenResponse { @@ -68,7 +71,8 @@ message CRSGenRequestExtended { string name = 1; google.protobuf.Timestamp valid_until = 2; google.protobuf.Duration valid_for = 3; - int32 max_registrations = 4; // Support for this is to be implemented in ROX-26769. + // 4 was int32 max_registrations + uint64 max_registrations = 5; } message InitBundleRevokeRequest { diff --git a/proto/storage/cluster_init.proto b/proto/storage/cluster_init.proto index b00a9ab0d8db6..cb44cdbb7f9f9 100644 --- a/proto/storage/cluster_init.proto +++ b/proto/storage/cluster_init.proto @@ -20,4 +20,7 @@ message InitBundleMeta { bool is_revoked = 5; google.protobuf.Timestamp expires_at = 6; InitBundleVersion version = 7; + uint64 max_registrations = 8; + repeated string registrations_initiated = 9; + repeated string registrations_completed = 10; } diff --git a/proto/storage/proto.lock b/proto/storage/proto.lock index ec549f53db7ea..25f88851c5de8 100644 --- a/proto/storage/proto.lock +++ b/proto/storage/proto.lock @@ -2873,6 +2873,23 @@ "id": 7, "name": "version", "type": "InitBundleVersion" + }, + { + "id": 8, + "name": "max_registrations", + "type": "uint64" + }, + { + "id": 9, + "name": "registrations_initiated", + "type": "string", + "is_repeated": true + }, + { + "id": 10, + "name": "registrations_completed", + "type": "string", + "is_repeated": true } ] } diff --git a/roxctl/central/crs/generate.go b/roxctl/central/crs/generate.go index 698ce48fd7c83..1714011849049 100644 --- a/roxctl/central/crs/generate.go +++ b/roxctl/central/crs/generate.go @@ -24,7 +24,7 @@ import ( // file specified by `outFilename` (if it is non-empty) or to stdout (if `outFilename` is empty). func generateCRS(cliEnvironment environment.Environment, name string, outFilename string, timeout time.Duration, retryTimeout time.Duration, - validFor time.Duration, validUntil time.Time, + validFor time.Duration, validUntil time.Time, maxClusters uint64, ) error { var err error var outFile *os.File @@ -55,11 +55,11 @@ func generateCRS(cliEnvironment environment.Environment, name string, } var resp *v1.CRSGenResponse - if validFor != 0 || !validUntil.IsZero() { - resp, err = generateCrsExtended(ctx, svc, name, validFor, validUntil) + if validFor != 0 || !validUntil.IsZero() || maxClusters != 0 { + resp, err = generateCrsExtended(ctx, svc, name, validFor, validUntil, maxClusters) if err != nil { if errStatus, ok := status.FromError(err); ok && errStatus.Code() == codes.Unimplemented { - cliEnvironment.Logger().ErrfLn("generating a CRS with custom expiration times requires a newer Central") + cliEnvironment.Logger().ErrfLn("generating a CRS with extended expiration settings requires a newer Central") return errors.Wrap(err, "missing extended CRS support in Central") } return errors.Wrap(err, "generating new CRS with extended settings") @@ -79,11 +79,14 @@ func generateCRS(cliEnvironment environment.Environment, name string, cliEnvironment.Logger().InfofLn("Successfully generated new CRS") cliEnvironment.Logger().InfofLn("") - cliEnvironment.Logger().InfofLn(" Name: %s", meta.GetName()) - cliEnvironment.Logger().InfofLn(" Created at: %s", meta.GetCreatedAt().AsTime().Format(time.RFC3339)) - cliEnvironment.Logger().InfofLn(" Expires at: %s", meta.GetExpiresAt().AsTime().Format(time.RFC3339)) - cliEnvironment.Logger().InfofLn(" Created By: %s", getPrettyUser(meta.GetCreatedBy())) - cliEnvironment.Logger().InfofLn(" ID: %s", meta.GetId()) + cliEnvironment.Logger().InfofLn(" Name: %s", meta.GetName()) + cliEnvironment.Logger().InfofLn(" Created at: %s", meta.GetCreatedAt().AsTime().Format(time.RFC3339)) + cliEnvironment.Logger().InfofLn(" Expires at: %s", meta.GetExpiresAt().AsTime().Format(time.RFC3339)) + cliEnvironment.Logger().InfofLn(" Created By: %s", getPrettyUser(meta.GetCreatedBy())) + if meta.GetMaxRegistrations() > 0 { + cliEnvironment.Logger().InfofLn(" Registration limit: %d clusters", meta.GetMaxRegistrations()) + } + cliEnvironment.Logger().InfofLn(" ID: %s", meta.GetId()) _, err = outWriter.Write(crs) if err != nil { @@ -96,7 +99,7 @@ func generateCRS(cliEnvironment environment.Environment, name string, } } - cliEnvironment.Logger().InfofLn("Then CRS needs to be stored securely, since it contains secrets.") + cliEnvironment.Logger().InfofLn("The CRS needs to be stored securely, since it contains secrets.") cliEnvironment.Logger().InfofLn("It is not possible to retrieve previously generated CRSs.") return nil } @@ -107,6 +110,7 @@ func generateCrsExtended( name string, validFor time.Duration, validUntil time.Time, + maxClusters uint64, ) (*v1.CRSGenResponse, error) { req := v1.CRSGenRequestExtended{Name: name} if validFor != 0 { @@ -115,6 +119,9 @@ func generateCrsExtended( if !validUntil.IsZero() { req.ValidUntil = timestamppb.New(validUntil) } + if maxClusters != 0 { + req.MaxRegistrations = maxClusters + } crs, err := svc.GenerateCRSExtended(ctx, &req) return crs, errors.Wrap(err, "generating CRS extended") @@ -125,6 +132,7 @@ func generateCommand(cliEnvironment environment.Environment) *cobra.Command { var outputFile string var validFor string var validUntil string + var maxClusters uint64 c := &cobra.Command{ Use: "generate ", @@ -154,11 +162,12 @@ func generateCommand(cliEnvironment environment.Environment) *cobra.Command { return errors.Wrap(err, "Invalid validity timestamp specified using `--valid-until'") } } - return generateCRS(cliEnvironment, name, outputFile, flags.Timeout(cmd), flags.RetryTimeout(cmd), validForDuration, validUntilTime) + return generateCRS(cliEnvironment, name, outputFile, flags.Timeout(cmd), flags.RetryTimeout(cmd), validForDuration, validUntilTime, maxClusters) }, } c.PersistentFlags().StringVarP(&validFor, "valid-for", "", "", "Specify validity duration for the new CRS (e.g. \"10m\", \"1d\").") c.PersistentFlags().StringVarP(&validUntil, "valid-until", "", "", "Specify validity as an RFC3339 timestamp for the new CRS.") + c.PersistentFlags().Uint64Var(&maxClusters, "max-clusters", 0, "Specify maximum number of clusters which can be registered with the new CRS (0 means no limit).") c.PersistentFlags().StringVarP(&outputFile, "output", "o", "", "File to be used for storing the newly generated CRS (- for stdout).") c.MarkFlagsMutuallyExclusive("valid-for", "valid-until") diff --git a/roxctl/maincommand/command_tree_debug.yaml b/roxctl/maincommand/command_tree_debug.yaml index fcf91324776a5..2627fd05ec1b9 100644 --- a/roxctl/maincommand/command_tree_debug.yaml +++ b/roxctl/maincommand/command_tree_debug.yaml @@ -87,6 +87,7 @@ central: - use-current-k8s-context generate: PERSISTENT_FLAGS: + - max-clusters - output - valid-for - valid-until diff --git a/roxctl/maincommand/command_tree_release.yaml b/roxctl/maincommand/command_tree_release.yaml index a0df123162ebf..b4eee418abcaa 100644 --- a/roxctl/maincommand/command_tree_release.yaml +++ b/roxctl/maincommand/command_tree_release.yaml @@ -87,6 +87,7 @@ central: - use-current-k8s-context generate: PERSISTENT_FLAGS: + - max-clusters - output - valid-for - valid-until diff --git a/tests/e2e/run-scanner-v4-install.bats b/tests/e2e/run-scanner-v4-install.bats index 64532729ae84d..a5e2db4b28ab2 100755 --- a/tests/e2e/run-scanner-v4-install.bats +++ b/tests/e2e/run-scanner-v4-install.bats @@ -522,6 +522,19 @@ EOT "$MAIN_IMAGE_TAG" "" \ "$secured_cluster_name" "$ROX_ADMIN_PASSWORD" "$central_endpoint" + ###################### + _begin "verifying-crs-revoked" + echo "Retrieving CRS listing..." + run "${ORCH_CMD}" &1 | grep -- --debug= >/dev/null } + +embed_crs_file() { + cat < Date: Tue, 17 Feb 2026 17:07:42 +0530 Subject: [PATCH 208/232] ROX-33156: Add new OOTB policy category for file activity monitoring (#19044) --- CHANGELOG.md | 4 +++- pkg/defaults/categories/files/file_activity_monitoring.json | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 pkg/defaults/categories/files/file_activity_monitoring.json diff --git a/CHANGELOG.md b/CHANGELOG.md index aae2259246213..df239ab307afc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ + # Changelog This file helps upstream users learn about what is new in a release. @@ -12,8 +13,9 @@ Changes should still be described appropriately in JIRA/doc input pages, for inc ## [NEXT RELEASE] ### Added Features -- ROX-24311: Detection and enforcement for pods/attach Kubernetes event +- ROX-24311: Detection and enforcement for pods/attach Kubernetes event. - ROX-33098 (Tech Preview): Effective path and Actual Path have been combined into a single File Path policy criterion. +- ROX-33156 (Tech Preview): A new default policy category called "File Activity Monitoring" is now available. ### Removed Features diff --git a/pkg/defaults/categories/files/file_activity_monitoring.json b/pkg/defaults/categories/files/file_activity_monitoring.json new file mode 100644 index 0000000000000..45fd6bddf521f --- /dev/null +++ b/pkg/defaults/categories/files/file_activity_monitoring.json @@ -0,0 +1,5 @@ +{ + "id": "342636c2-9780-4b77-a741-0e68566a02b2", + "name":"File Activity Monitoring", + "isDefault": true +} \ No newline at end of file From 8b70896b08db3d2b3e997595b94eef929222cd52 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 17 Feb 2026 14:09:30 +0100 Subject: [PATCH 209/232] ROX-30992: log error when deletion fails (#19043) --- central/declarativeconfig/manager_impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/central/declarativeconfig/manager_impl.go b/central/declarativeconfig/manager_impl.go index ab0db50c96d51..133d90d7d61e9 100644 --- a/central/declarativeconfig/manager_impl.go +++ b/central/declarativeconfig/manager_impl.go @@ -304,7 +304,7 @@ func (m *managerImpl) doDeletion(transformedMessagesByHandler map[string]protoMe // Otherwise, the reason why the deletion failed will not be visible to users while the resource may still // exist. if err != nil { - log.Warnf("The following IDs failed deletion: [%s]", strings.Join(failedDeletionIDs, ",")) + log.Warnf("Failed to delete resources [%s]: %s", strings.Join(failedDeletionIDs, ","), err) allProtoIDsToSkip = append(allProtoIDsToSkip, failedDeletionIDs...) failureInDeletion = true } From 64a4705674b90193971f28fed1f4e339ca8f3106 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 16:38:18 +0100 Subject: [PATCH 210/232] chore(deps): bump github.com/go-git/go-git/v5 from 5.13.1 to 5.16.5 in /tests/performance/scale (#18929) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/performance/scale/go.mod | 23 ++++++-------- tests/performance/scale/go.sum | 56 +++++++++++++++------------------- 2 files changed, 34 insertions(+), 45 deletions(-) diff --git a/tests/performance/scale/go.mod b/tests/performance/scale/go.mod index cb84cae4f476b..b268f04f568cf 100644 --- a/tests/performance/scale/go.mod +++ b/tests/performance/scale/go.mod @@ -4,7 +4,7 @@ go 1.25 require ( github.com/cloud-bulldozer/go-commons v1.0.11 - github.com/go-git/go-git/v5 v5.13.1 + github.com/go-git/go-git/v5 v5.16.5 github.com/hashicorp/go-multierror v1.1.1 github.com/spf13/cobra v1.7.0 go.yaml.in/yaml/v3 v3.0.3 @@ -13,24 +13,24 @@ require ( require ( dario.cat/mergo v1.0.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect @@ -44,21 +44,18 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect golang.org/x/crypto v0.45.0 // indirect - golang.org/x/mod v0.29.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.38.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/tests/performance/scale/go.sum b/tests/performance/scale/go.sum index 8148567cd11e4..132e3ef32d8b0 100644 --- a/tests/performance/scale/go.sum +++ b/tests/performance/scale/go.sum @@ -1,10 +1,10 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -15,13 +15,13 @@ github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= -github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= -github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= @@ -30,12 +30,12 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= +github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -49,17 +49,15 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -106,20 +104,20 @@ github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -151,8 +149,6 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -165,8 +161,6 @@ golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -198,8 +192,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 8230d7c2681a5ce534f78a4a7b522e5127097398 Mon Sep 17 00:00:00 2001 From: Stephan Hesselmann Date: Tue, 17 Feb 2026 18:39:25 +0100 Subject: [PATCH 211/232] ROX-33191: use proxy.Transport in auth providers (#19058) --- .../authproviders/idputil/http_clients.go | 37 +++++++++++++++++++ .../oidc/internal/endpoint/helper.go | 2 +- .../oidc/internal/endpoint/helper_test.go | 14 +++---- .../oidc/internal/endpoint/http_clients.go | 16 ++++++++ .../internal/endpoint/insecure_http_client.go | 14 ------- pkg/auth/authproviders/saml/http_clients.go | 16 ++++++++ .../saml/insecure_http_client.go | 14 ------- pkg/auth/authproviders/saml/metadata.go | 2 +- 8 files changed, 78 insertions(+), 37 deletions(-) create mode 100644 pkg/auth/authproviders/idputil/http_clients.go create mode 100644 pkg/auth/authproviders/oidc/internal/endpoint/http_clients.go delete mode 100644 pkg/auth/authproviders/oidc/internal/endpoint/insecure_http_client.go create mode 100644 pkg/auth/authproviders/saml/http_clients.go delete mode 100644 pkg/auth/authproviders/saml/insecure_http_client.go diff --git a/pkg/auth/authproviders/idputil/http_clients.go b/pkg/auth/authproviders/idputil/http_clients.go new file mode 100644 index 0000000000000..c4ba63bd037dc --- /dev/null +++ b/pkg/auth/authproviders/idputil/http_clients.go @@ -0,0 +1,37 @@ +package idputil + +import ( + "crypto/tls" + "net/http" + "time" + + "github.com/stackrox/rox/pkg/httputil/proxy" +) + +const ( + // defaultTimeout is the timeout for HTTP requests to external IdPs. + // This prevents hanging calls while still allowing enough time for typical + // authentication flows. + defaultTimeout = 30 * time.Second +) + +// NewHTTPClient creates a proxy-aware HTTP client for secure IdP connections. +// The client includes a timeout to prevent hanging calls to external IdPs. +func NewHTTPClient() *http.Client { + return &http.Client{ + Transport: proxy.RoundTripper(), + Timeout: defaultTimeout, + } +} + +// NewInsecureHTTPClient creates a proxy-aware HTTP client that skips TLS verification. +// This should only be used when the IdP URL contains the "+insecure" scheme suffix. +// The client includes a timeout to prevent hanging calls to external IdPs. +func NewInsecureHTTPClient() *http.Client { + return &http.Client{ + Transport: proxy.RoundTripper( + proxy.WithTLSConfig(&tls.Config{InsecureSkipVerify: true}), + ), + Timeout: defaultTimeout, + } +} diff --git a/pkg/auth/authproviders/oidc/internal/endpoint/helper.go b/pkg/auth/authproviders/oidc/internal/endpoint/helper.go index 4a563d8992a53..f000c856d86b3 100644 --- a/pkg/auth/authproviders/oidc/internal/endpoint/helper.go +++ b/pkg/auth/authproviders/oidc/internal/endpoint/helper.go @@ -96,7 +96,7 @@ func NewHelper(issuer string) (*Helper, error) { if stringutils.ConsumeSuffix(&urlForDiscovery.Scheme, "+insecure") { httpClient = insecureHTTPClient } else { - httpClient = http.DefaultClient + httpClient = defaultHTTPClient } h := &Helper{ diff --git a/pkg/auth/authproviders/oidc/internal/endpoint/helper_test.go b/pkg/auth/authproviders/oidc/internal/endpoint/helper_test.go index 6147f8687b7c1..67aad524ce8a9 100644 --- a/pkg/auth/authproviders/oidc/internal/endpoint/helper_test.go +++ b/pkg/auth/authproviders/oidc/internal/endpoint/helper_test.go @@ -78,8 +78,8 @@ func TestHelper_HTTPClient(t *testing.T) { }{ { name: "default", - fields: fields{httpClient: http.DefaultClient}, - want: http.DefaultClient, + fields: fields{httpClient: defaultHTTPClient}, + want: defaultHTTPClient, }, { name: "insecure", @@ -212,7 +212,7 @@ func TestNewHelper(t *testing.T) { Fragment: "e&f", }, canonicalIssuer: "https://a/b/bb?c&d#e&f", - httpClient: http.DefaultClient, + httpClient: defaultHTTPClient, urlForDiscovery: "https://a/b/bb", }, }, @@ -229,7 +229,7 @@ func TestNewHelper(t *testing.T) { Fragment: "e&f", }, canonicalIssuer: "https://a/b%2Fbb?c&d#e&f", - httpClient: http.DefaultClient, + httpClient: defaultHTTPClient, urlForDiscovery: "https://a/b%2Fbb", }, }, @@ -261,7 +261,7 @@ func TestNewHelper(t *testing.T) { Fragment: "e&f", }, canonicalIssuer: "https://a/b?c&d#e&f", - httpClient: http.DefaultClient, + httpClient: defaultHTTPClient, urlForDiscovery: "https://a/b", }, }, @@ -287,7 +287,7 @@ func TestNewHelper(t *testing.T) { Fragment: "e&f", }, canonicalIssuer: "https://data:99999999?c&d#e&f", - httpClient: http.DefaultClient, + httpClient: defaultHTTPClient, urlForDiscovery: "https://data:99999999", }, }, @@ -306,7 +306,7 @@ func TestNewHelper(t *testing.T) { Fragment: "e&f", }, canonicalIssuer: "https://data:?c&d#e&f", - httpClient: http.DefaultClient, + httpClient: defaultHTTPClient, urlForDiscovery: "https://data:", }, }, diff --git a/pkg/auth/authproviders/oidc/internal/endpoint/http_clients.go b/pkg/auth/authproviders/oidc/internal/endpoint/http_clients.go new file mode 100644 index 0000000000000..27bc3055b5f45 --- /dev/null +++ b/pkg/auth/authproviders/oidc/internal/endpoint/http_clients.go @@ -0,0 +1,16 @@ +package endpoint + +import ( + "github.com/stackrox/rox/pkg/auth/authproviders/idputil" +) + +var ( + // defaultHTTPClient is a proxy-aware HTTP client for secure OIDC connections. + // It includes a timeout to prevent hanging calls to external IdPs. + defaultHTTPClient = idputil.NewHTTPClient() + + // insecureHTTPClient is a proxy-aware HTTP client that skips TLS verification. + // This is used when the issuer URL contains the "+insecure" scheme suffix. + // It includes a timeout to prevent hanging calls to external IdPs. + insecureHTTPClient = idputil.NewInsecureHTTPClient() +) diff --git a/pkg/auth/authproviders/oidc/internal/endpoint/insecure_http_client.go b/pkg/auth/authproviders/oidc/internal/endpoint/insecure_http_client.go deleted file mode 100644 index 8e39f250a117a..0000000000000 --- a/pkg/auth/authproviders/oidc/internal/endpoint/insecure_http_client.go +++ /dev/null @@ -1,14 +0,0 @@ -package endpoint - -import ( - "crypto/tls" - "net/http" - - "github.com/stackrox/rox/pkg/httputil/proxy" -) - -var insecureHTTPClient = &http.Client{ - Transport: proxy.RoundTripper( - proxy.WithTLSConfig(&tls.Config{InsecureSkipVerify: true}), - ), -} diff --git a/pkg/auth/authproviders/saml/http_clients.go b/pkg/auth/authproviders/saml/http_clients.go new file mode 100644 index 0000000000000..f377ec215484e --- /dev/null +++ b/pkg/auth/authproviders/saml/http_clients.go @@ -0,0 +1,16 @@ +package saml + +import ( + "github.com/stackrox/rox/pkg/auth/authproviders/idputil" +) + +var ( + // defaultHTTPClient is a proxy-aware HTTP client for secure SAML connections. + // It includes a timeout to prevent hanging calls to external IdPs. + defaultHTTPClient = idputil.NewHTTPClient() + + // insecureHTTPClient is a proxy-aware HTTP client that skips TLS verification. + // This is used when the metadata URL contains the "+insecure" scheme suffix. + // It includes a timeout to prevent hanging calls to external IdPs. + insecureHTTPClient = idputil.NewInsecureHTTPClient() +) diff --git a/pkg/auth/authproviders/saml/insecure_http_client.go b/pkg/auth/authproviders/saml/insecure_http_client.go deleted file mode 100644 index f1c52640da174..0000000000000 --- a/pkg/auth/authproviders/saml/insecure_http_client.go +++ /dev/null @@ -1,14 +0,0 @@ -package saml - -import ( - "crypto/tls" - "net/http" - - "github.com/stackrox/rox/pkg/httputil/proxy" -) - -var insecureHTTPClient = &http.Client{ - Transport: proxy.RoundTripper( - proxy.WithTLSConfig(&tls.Config{InsecureSkipVerify: true}), - ), -} diff --git a/pkg/auth/authproviders/saml/metadata.go b/pkg/auth/authproviders/saml/metadata.go index 481e58e5d5533..4dd31a538b195 100644 --- a/pkg/auth/authproviders/saml/metadata.go +++ b/pkg/auth/authproviders/saml/metadata.go @@ -31,7 +31,7 @@ func fetchIDPMetadata(ctx context.Context, url string) (string, *types.IDPSSODes return "", nil, errors.Wrap(err, "could not create HTTP request") } - httpClient := http.DefaultClient + httpClient := defaultHTTPClient if stringutils.ConsumeSuffix(&request.URL.Scheme, "+insecure") { httpClient = insecureHTTPClient } From 843afe3379c514749660958f4f8e87290d5a3692 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Wed, 18 Feb 2026 06:14:33 +0100 Subject: [PATCH 212/232] ROX-33114: operator helm chart generation (#19055) Co-authored-by: StackRox PR Fixxxer --- operator/Makefile | 14 ++- operator/PROJECT | 3 + .../rhacs-operator.clusterserviceversion.yaml | 3 +- operator/config/manager/manager.yaml | 3 +- operator/tools/kubebuilder/go.mod | 36 ++++++ operator/tools/kubebuilder/go.sum | 113 ++++++++++++++++++ operator/tools/kubebuilder/noop.go | 3 + operator/tools/kubebuilder/tool.go | 8 ++ scripts/ci/jobs/check-generated.sh | 20 ++++ 9 files changed, 200 insertions(+), 3 deletions(-) create mode 100644 operator/tools/kubebuilder/go.mod create mode 100644 operator/tools/kubebuilder/go.sum create mode 100644 operator/tools/kubebuilder/noop.go create mode 100644 operator/tools/kubebuilder/tool.go diff --git a/operator/Makefile b/operator/Makefile index cc3b0777593fe..41fd03d136a46 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -193,6 +193,7 @@ include $(PROJECT_DIR)/../make/gotools.mk $(call go-tool, CONTROLLER_GEN, sigs.k8s.io/controller-tools/cmd/controller-gen, tools/controller-gen) $(call go-tool, ENVTEST, sigs.k8s.io/controller-runtime/tools/setup-envtest, tools/envtest) $(call go-tool, KUSTOMIZE, sigs.k8s.io/kustomize/kustomize/v5, tools/kustomize) +$(call go-tool, KUBEBUILDER, sigs.k8s.io/kubebuilder/v4, tools/kubebuilder) $(call go-tool, KUTTL, github.com/kudobuilder/kuttl/cmd/kubectl-kuttl, tools/kuttl) $(call go-tool, YQ, github.com/mikefarah/yq/v4, tools/yq) @@ -208,6 +209,9 @@ OPERATOR_SDK_VERSION = $(shell cd tools/operator-sdk; go list -m -f '{{ .Version .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +.PHONY: kubebuilder +kubebuilder: $(KUBEBUILDER) ## Download kubebuilder locally if necessary. + .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. @@ -228,7 +232,7 @@ yq: $(YQ) ## Download yq. .PHONY: parent-proto-generate parent-proto-generate: ## Make sure ../generated directory has up-to-date content that this operator (transitively) depends upon. - $(MAKE) -C .. proto-generated-srcs + [[ $${ROX_OPERATOR_SKIP_PROTO_GENERATED_SRCS:-false} = true ]] || $(MAKE) -C .. proto-generated-srcs .PHONY: manifests manifests: parent-proto-generate controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. @@ -315,6 +319,14 @@ build-installer: manifests generate kustomize ## Generate a consolidated YAML wi mkdir -p dist $(KUSTOMIZE) build build/dist > dist/install.yaml +.PHONY: chart +chart: kubebuilder manifests ## Generate a helm chart with all necessary resources. +# The dependency above makes sure protos are up to date, so we can skip this time-consuming process below +# by specifying the SKIP env var. Otherwise each target that kubebuilder invokes (and there is a bunch) would regen protos. + ROX_OPERATOR_SKIP_PROTO_GENERATED_SRCS=true $(KUBEBUILDER) edit --plugins=helm/v2-alpha --force + sed -i'.bak' -e 's,0.1.0,$(VERSION),g' dist/chart/Chart.yaml + rm -f dist/chart/Chart.yaml.bak + .PHONY: build build: manifests generate fmt vet ## Build operator local binary. ../scripts/go-build-file.sh ./cmd/main.go bin/manager diff --git a/operator/PROJECT b/operator/PROJECT index 6c95471ff71b0..0d297bd10cc76 100644 --- a/operator/PROJECT +++ b/operator/PROJECT @@ -6,6 +6,9 @@ domain: stackrox.io layout: - go.kubebuilder.io/v4 plugins: + helm.kubebuilder.io/v2-alpha: + manifests: dist/install.yaml + output: dist manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} projectName: rhacs-operator diff --git a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml index c901c6988ed61..51aec6b332aa8 100644 --- a/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/rhacs-operator.clusterserviceversion.yaml @@ -1989,8 +1989,8 @@ spec: spec: containers: - args: - - --health-probe-bind-address=:8081 - --metrics-bind-address=0.0.0.0:8443 + - --health-probe-bind-address=:8081 - --leader-elect env: - name: RELATED_IMAGE_MAIN @@ -2010,6 +2010,7 @@ spec: divisor: '0' resource: limits.memory image: controller:latest + imagePullPolicy: IfNotPresent livenessProbe: httpGet: path: /healthz diff --git a/operator/config/manager/manager.yaml b/operator/config/manager/manager.yaml index 79b822da9624f..3a31cb5c6c2ca 100644 --- a/operator/config/manager/manager.yaml +++ b/operator/config/manager/manager.yaml @@ -55,8 +55,8 @@ spec: # type: RuntimeDefault containers: - args: - - "--health-probe-bind-address=:8081" - "--metrics-bind-address=0.0.0.0:8443" + - "--health-probe-bind-address=:8081" - --leader-elect env: - name: RELATED_IMAGE_MAIN @@ -75,6 +75,7 @@ spec: containerName: manager resource: limits.memory image: controller:latest + imagePullPolicy: IfNotPresent name: manager securityContext: allowPrivilegeEscalation: false diff --git a/operator/tools/kubebuilder/go.mod b/operator/tools/kubebuilder/go.mod new file mode 100644 index 0000000000000..ccb2a16d0c41d --- /dev/null +++ b/operator/tools/kubebuilder/go.mod @@ -0,0 +1,36 @@ +module github.com/stackrox/rox/operator/tools/kubebuilder + +go 1.25.7 + +require sigs.k8s.io/kubebuilder/v4 v4.12.0 + +require ( + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/gobuffalo/flect v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cobra v1.10.2 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/mod v0.33.0 // indirect + golang.org/x/net v0.50.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/text v0.34.0 // indirect + golang.org/x/tools v0.42.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/apimachinery v0.35.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 // indirect + k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/operator/tools/kubebuilder/go.sum b/operator/tools/kubebuilder/go.sum new file mode 100644 index 0000000000000..1c127cd0535c6 --- /dev/null +++ b/operator/tools/kubebuilder/go.sum @@ -0,0 +1,113 @@ +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= +github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= +github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= +golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +helm.sh/helm/v3 v3.20.0 h1:2M+0qQwnbI1a2CxN7dbmfsWHg/MloeaFMnZCY56as50= +helm.sh/helm/v3 v3.20.0/go.mod h1:rTavWa0lagZOxGfdhu4vgk1OjH2UYCnrDKE2PVC4N0o= +k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= +k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY= +k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU= +k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kubebuilder/v4 v4.12.0 h1:dDbD+mDp5NlnzY0P9ywqWbGgjDLxVCwJFDzkafN12pI= +sigs.k8s.io/kubebuilder/v4 v4.12.0/go.mod h1:Kh2SGCA+0tEkpgMQsXFH3+CdL2JHxndBCnWCL0cAYik= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/operator/tools/kubebuilder/noop.go b/operator/tools/kubebuilder/noop.go new file mode 100644 index 0000000000000..9c8f4ae861c09 --- /dev/null +++ b/operator/tools/kubebuilder/noop.go @@ -0,0 +1,3 @@ +package main + +// See https://github.com/stackrox/stackrox/pull/10125#discussion_r1507387689 diff --git a/operator/tools/kubebuilder/tool.go b/operator/tools/kubebuilder/tool.go new file mode 100644 index 0000000000000..02b73790f768c --- /dev/null +++ b/operator/tools/kubebuilder/tool.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package main + +import ( + _ "sigs.k8s.io/kubebuilder/v4" +) diff --git a/scripts/ci/jobs/check-generated.sh b/scripts/ci/jobs/check-generated.sh index be3cbd7067bb3..7f79983738e2a 100755 --- a/scripts/ci/jobs/check-generated.sh +++ b/scripts/ci/jobs/check-generated.sh @@ -84,11 +84,31 @@ function check-operator-generated-files-up-to-date() { make -C operator/ manifests echo 'Checking for diffs after making generate and manifests...' git diff --exit-code HEAD + make -C operator/ bundle echo 'Checking for diffs after making bundle...' echo 'If this fails, check if the invocation of the normalize-metadata.py script in operator/Makefile' echo 'needs to change due to formatting changes in the generated files.' git diff --exit-code HEAD + + # For as long as the helm chart kubebuilder plugin is alpha, we want to check that kubebuilder bumps do not surprise + # us with unexpected divergence compared to the (more seasoned and predictable) manifest output. + make -C operator/ chart + echo 'Expanding the operator helm chart...' + helm template --namespace rhacs-operator-system rhacs-operator ./operator/dist/chart/ > operator/dist/chart.yaml + echo 'Downloading yq...' + make -C operator/ yq + yq=$(make --no-print-directory --silent -C operator/ which-yq) + echo 'Normalizing the manifests...' + # Reorder resources in the files, strip comments, pretty print, and remove expected differences: + # - "resource-policy: keep" on the CRDs in the chart + # - namespace resource in the manifest + $yq -P ea '[.] | sort_by(.kind, .metadata.name) | del(.[].metadata.annotations.["helm.sh/resource-policy"]) | .[] | splitDoc | ... comments=""' \ + operator/dist/chart.yaml > operator/dist/chart-sorted.yaml + $yq -P ea '[.] | sort_by(.kind, .metadata.name) | filter(.kind != "Namespace") | .[] | splitDoc | ... comments=""' \ + operator/dist/install.yaml > operator/dist/install-sorted.yaml + echo 'Checking for differences between normalized operator manifest and normalized and expanded operator helm chart...' + diff -U 10 operator/dist/install-sorted.yaml operator/dist/chart-sorted.yaml } export -f check-operator-generated-files-up-to-date bash -c check-operator-generated-files-up-to-date || { From 1d5828e86dd5c7a2fe42077baeec4353ff51d75b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 01:55:30 -0700 Subject: [PATCH 213/232] chore(deps): bump google.golang.org/api from 0.266.0 to 0.267.0 (#19079) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 308304ca62acd..94e54f1aae533 100644 --- a/go.mod +++ b/go.mod @@ -150,7 +150,7 @@ require ( golang.org/x/time v0.14.0 golang.org/x/tools v0.42.0 golang.stackrox.io/grpc-http1 v0.5.1 - google.golang.org/api v0.266.0 + google.golang.org/api v0.267.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 google.golang.org/grpc v1.79.1 diff --git a/go.sum b/go.sum index 39a4cb9724b0f..f7da417b81df5 100644 --- a/go.sum +++ b/go.sum @@ -2177,8 +2177,8 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/api v0.266.0 h1:hco+oNCf9y7DmLeAtHJi/uBAY7n/7XC9mZPxu1ROiyk= -google.golang.org/api v0.266.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= +google.golang.org/api v0.267.0 h1:w+vfWPMPYeRs8qH1aYYsFX68jMls5acWl/jocfLomwE= +google.golang.org/api v0.267.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 9fb3fd0e02308588f032b9e9eaafea8ca27c0dbe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 02:27:39 -0700 Subject: [PATCH 214/232] chore(deps): bump the aws-sdk-go-v2 group with 3 updates (#19078) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 94e54f1aae533..b4c5f0eafc2ff 100644 --- a/go.mod +++ b/go.mod @@ -26,10 +26,10 @@ require ( github.com/adhocore/gronx v1.19.6 github.com/andygrunwald/go-jira v1.17.0 github.com/aws/aws-sdk-go-v2 v1.41.1 - github.com/aws/aws-sdk-go-v2/config v1.32.7 - github.com/aws/aws-sdk-go-v2/credentials v1.19.7 + github.com/aws/aws-sdk-go-v2/config v1.32.8 + github.com/aws/aws-sdk-go-v2/credentials v1.19.8 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1 github.com/aws/aws-sdk-go-v2/service/ecr v1.55.2 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4 @@ -243,7 +243,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beevik/etree v1.5.0 // indirect diff --git a/go.sum b/go.sum index f7da417b81df5..94e079032a47d 100644 --- a/go.sum +++ b/go.sum @@ -289,14 +289,14 @@ github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6ce github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= -github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY= -github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY= -github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8= -github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= +github.com/aws/aws-sdk-go-v2/config v1.32.8 h1:iu+64gwDKEoKnyTQskSku72dAwggKI5sV6rNvgSMpMs= +github.com/aws/aws-sdk-go-v2/config v1.32.8/go.mod h1:MI2XvA+qDi3i9AJxX1E2fu730syEBzp/jnXrjxuHwgI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.8 h1:Jp2JYH1lRT3KhX4mshHPvVYsR5qqRec3hGvEarNYoR0= +github.com/aws/aws-sdk-go-v2/credentials v1.19.8/go.mod h1:fZG9tuvyVfxknv1rKibIz3DobRaFw1Poe8IKtXB3XYY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0 h1:MpkX8EjkwuvyuX9B7+Zgk5M4URb2WQ84Y6jM81n5imw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.0/go.mod h1:4V9Pv5sFfMPWQF0Q0zYN6BlV/504dFGaTeogallRqQw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1 h1:IbWiN670htmBioc+Zj32vSpJgQ2+OYSlvTvfQ1nCORQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1/go.mod h1:tw/B596EUhBWDFGdDGuLC21fVU4A3s4/5Efy8S39W18= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= @@ -327,8 +327,8 @@ github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4 github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 h1:0jbJeuEHlwKJ9PfXtpSFc4MF+WIWORdhN1n30ITZGFM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= From cb825fe3596afe56b9d0c1dd53479dff06cd751c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 03:49:13 -0700 Subject: [PATCH 215/232] chore(deps): bump github.com/mikefarah/yq/v4 from 4.52.2 to 4.52.4 in /operator/tools/yq (#19076) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- operator/tools/yq/go.mod | 12 ++++++------ operator/tools/yq/go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/operator/tools/yq/go.mod b/operator/tools/yq/go.mod index 7efb0bae0eb3f..4d907908c2f29 100644 --- a/operator/tools/yq/go.mod +++ b/operator/tools/yq/go.mod @@ -2,7 +2,7 @@ module github.com/stackrox/rox/operator/tools/yq go 1.25 -require github.com/mikefarah/yq/v4 v4.52.2 +require github.com/mikefarah/yq/v4 v4.52.4 require ( github.com/a8m/envsubst v1.4.3 // indirect @@ -29,11 +29,11 @@ require ( github.com/yuin/gopher-lua v1.1.1 // indirect github.com/zclconf/go-cty v1.17.0 // indirect go.yaml.in/yaml/v4 v4.0.0-rc.3 // indirect - golang.org/x/mod v0.31.0 // indirect - golang.org/x/net v0.49.0 // indirect + golang.org/x/mod v0.33.0 // indirect + golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.19.0 // indirect - golang.org/x/sys v0.40.0 // indirect - golang.org/x/text v0.33.0 // indirect - golang.org/x/tools v0.40.0 // indirect + golang.org/x/sys v0.41.0 // indirect + golang.org/x/text v0.34.0 // indirect + golang.org/x/tools v0.41.0 // indirect gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect ) diff --git a/operator/tools/yq/go.sum b/operator/tools/yq/go.sum index 39724fd59ef75..d83c1be045670 100644 --- a/operator/tools/yq/go.sum +++ b/operator/tools/yq/go.sum @@ -44,8 +44,8 @@ github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHP github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mikefarah/yq/v4 v4.52.2 h1:g38MGUsWO4y6Te1tzZy3fk6hZ9xknRHLzBaXVoqfAyI= -github.com/mikefarah/yq/v4 v4.52.2/go.mod h1:05ytoLM9RqcBCI73V3lqDL1gbQ29mEe573IslI9ibU8= +github.com/mikefarah/yq/v4 v4.52.4 h1:wZlxBMjyKCzzQjL0u6a3zToKuyE7OdJr4OtLBtwph4Q= +github.com/mikefarah/yq/v4 v4.52.4/go.mod h1:8QwgSgDsmt4LCbfwvGUAh5oWSukRRuVJ8Gj98zJ/45o= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= @@ -72,19 +72,19 @@ github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmB go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go= go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= -golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= -golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= +golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= -golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= -golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= -golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= +golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= From a8dd21c920e62693a8f09fbe7e196b1d563679f7 Mon Sep 17 00:00:00 2001 From: Khushboo Sancheti <42253461+clickboo@users.noreply.github.com> Date: Wed, 18 Feb 2026 18:04:36 +0530 Subject: [PATCH 216/232] chore(be): Refactoring policy unit tests (#19059) --- .../default_policies_builtin_test.go | 1637 +++++++ pkg/booleanpolicy/default_policies_test.go | 4162 ----------------- pkg/booleanpolicy/deployment_policies_test.go | 1161 ----- pkg/booleanpolicy/image_criteria_test.go | 522 +++ pkg/booleanpolicy/network_criteria_test.go | 180 + ...policies_test.go => node_criteria_test.go} | 183 +- pkg/booleanpolicy/policies_helpers_test.go | 411 ++ pkg/booleanpolicy/runtime_criteria_test.go | 1674 +++++++ pkg/booleanpolicy/workload_criteria_test.go | 958 ++++ tools/allowed-large-files | 3 +- 10 files changed, 5435 insertions(+), 5456 deletions(-) create mode 100644 pkg/booleanpolicy/default_policies_builtin_test.go delete mode 100644 pkg/booleanpolicy/default_policies_test.go delete mode 100644 pkg/booleanpolicy/deployment_policies_test.go create mode 100644 pkg/booleanpolicy/image_criteria_test.go create mode 100644 pkg/booleanpolicy/network_criteria_test.go rename pkg/booleanpolicy/{node_policies_test.go => node_criteria_test.go} (53%) create mode 100644 pkg/booleanpolicy/policies_helpers_test.go create mode 100644 pkg/booleanpolicy/runtime_criteria_test.go create mode 100644 pkg/booleanpolicy/workload_criteria_test.go diff --git a/pkg/booleanpolicy/default_policies_builtin_test.go b/pkg/booleanpolicy/default_policies_builtin_test.go new file mode 100644 index 0000000000000..a53df91e89b73 --- /dev/null +++ b/pkg/booleanpolicy/default_policies_builtin_test.go @@ -0,0 +1,1637 @@ +package booleanpolicy + +import ( + "fmt" + "regexp" + "testing" + "time" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/fixtures" + policyUtils "github.com/stackrox/rox/pkg/policies" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/protoconv" + "github.com/stackrox/rox/pkg/readable" + "github.com/stackrox/rox/pkg/set" + "github.com/stackrox/rox/pkg/signatures" + "github.com/stackrox/rox/pkg/sliceutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type DefaultPoliciesTestSuite struct { + basePoliciesTestSuite +} + +func TestDefaultPolicies(t *testing.T) { + t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") + suite.Run(t, new(DefaultPoliciesTestSuite)) +} + +func (suite *DefaultPoliciesTestSuite) TestNoDuplicatePolicyIDs() { + ids := set.NewStringSet() + for _, p := range suite.defaultPolicies { + suite.True(ids.Add(p.GetId())) + } +} + +func (suite *DefaultPoliciesTestSuite) TestRuntimePolicyFieldsCompile() { + for _, p := range suite.defaultPolicies { + if policyUtils.AppliesAtRunTime(p) { + checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessName) + checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessArguments) + checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessAncestor) + } + } +} + +func checkRegexCompiles(sections []*storage.PolicySection, fieldname string) { + for _, s := range sections { + for _, g := range s.GetPolicyGroups() { + if g.GetFieldName() == fieldname { + if policyVals := g.GetValues(); len(policyVals) > 0 { + for _, policyVal := range policyVals { + if v := policyVal.GetValue(); v != "" { + regexp.MustCompile(v) + } + } + } + } + } + } +} + +func (suite *DefaultPoliciesTestSuite) TestDefaultPolicies() { + fixtureDep := fixtures.GetDeployment() + fixturesImages := fixtures.DeploymentImages() + + suite.addDepAndImages(fixtureDep, fixturesImages...) + + nginx110 := &storage.Image{ + Id: "SHANGINX110", + Name: &storage.ImageName{ + Registry: "docker.io", + Remote: "library/nginx", + Tag: "1.10", + FullName: "docker.io/library/nginx:1.10", + }, + } + + nginx110Dep := deploymentWithImage("nginx110", nginx110) + suite.addDepAndImages(nginx110Dep, nginx110) + + oldScannedTime := time.Now().Add(-31 * 24 * time.Hour) + + oldScannedImage := &storage.Image{ + Id: "SHAOLDSCANNED", + Name: &storage.ImageName{ + FullName: "docker.io/stackrox/old-scanned-image:0.1", + }, + Scan: &storage.ImageScan{ + ScanTime: protoconv.ConvertTimeToTimestamp(oldScannedTime), + }, + } + oldScannedDep := deploymentWithImage("oldscanned", oldScannedImage) + suite.addDepAndImages(oldScannedDep, oldScannedImage) + + addDockerFileImg := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "ADD", + Value: "deploy.sh", + }, + { + Instruction: "RUN", + Value: "deploy.sh", + }, + }) + addDockerFileDep := deploymentWithImageAnyID(addDockerFileImg) + suite.addDepAndImages(addDockerFileDep, addDockerFileImg) + + imagePort22Image := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "EXPOSE", + Value: "22/tcp", + }, + }) + imagePort22Dep := deploymentWithImageAnyID(imagePort22Image) + suite.addDepAndImages(imagePort22Dep, imagePort22Image) + + insecureCMDImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "CMD", + Value: "do an insecure thing", + }, + }) + + insecureCMDDep := deploymentWithImageAnyID(insecureCMDImage) + suite.addDepAndImages(insecureCMDDep, insecureCMDImage) + + runSecretsImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "VOLUME", + Value: "/run/secrets", + }, + }) + runSecretsArrayImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "VOLUME", + Value: "[/run/secrets]", + }, + }) + runSecretsListImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "VOLUME", + Value: "/var/something /run/secrets", + }, + }) + runSecretsArrayListImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "VOLUME", + Value: "[/var/something /run/secrets]", + }, + }) + runSecretsDep := deploymentWithImageAnyID(runSecretsImage) + runSecretsArrayDep := deploymentWithImageAnyID(runSecretsArrayImage) + runSecretsListDep := deploymentWithImageAnyID(runSecretsListImage) + runSecretsArrayListDep := deploymentWithImageAnyID(runSecretsArrayListImage) + suite.addDepAndImages(runSecretsDep, runSecretsImage) + suite.addDepAndImages(runSecretsArrayDep, runSecretsArrayImage) + suite.addDepAndImages(runSecretsListDep, runSecretsListImage) + suite.addDepAndImages(runSecretsArrayListDep, runSecretsArrayListImage) + + oldImageCreationTime := time.Now().Add(-100 * 24 * time.Hour) + oldCreatedImage := &storage.Image{ + Id: "SHA:OLDCREATEDIMAGE", + Name: &storage.ImageName{ + FullName: "docker.io/stackrox/old-image:0.1", + }, + Metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Created: protoconv.ConvertTimeToTimestamp(oldImageCreationTime), + }, + }, + } + oldImageDep := deploymentWithImage("oldimagedep", oldCreatedImage) + suite.addDepAndImages(oldImageDep, oldCreatedImage) + + apkImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "apk-tools", Version: "1.2"}, + {Name: "asfa", Version: "1.5"}, + }) + apkDep := deploymentWithImageAnyID(apkImage) + suite.addDepAndImages(apkDep, apkImage) + + curlImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "curl", Version: "1.3"}, + {Name: "curlwithextra", Version: "0.9"}, + }) + curlDep := deploymentWithImageAnyID(curlImage) + suite.addDepAndImages(curlDep, curlImage) + + componentDeps := make(map[string]*storage.Deployment) + for _, component := range []string{"apt", "dnf", "wget"} { + img := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: component}, + }) + dep := deploymentWithImageAnyID(img) + suite.addDepAndImages(dep, img) + componentDeps[component] = dep + } + + heartbleedDep := &storage.Deployment{ + Id: "HEARTBLEEDDEPID", + Containers: []*storage.Container{ + { + Name: "nginx", + SecurityContext: &storage.SecurityContext{Privileged: true}, + Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, + }, + }, + } + suite.addDepAndImages(heartbleedDep, &storage.Image{ + Id: "HEARTBLEEDDEPSHA", + Name: &storage.ImageName{FullName: "heartbleed"}, + Scan: &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}}, + }}, + }, + }, + }) + + requiredImageLabel := &storage.Deployment{ + Id: "requiredImageLabel", + Containers: []*storage.Container{ + { + Name: "REQUIREDIMAGELABEL", + Image: &storage.ContainerImage{Id: "requiredImageLabelImage"}, + }, + }, + } + suite.addDepAndImages(requiredImageLabel, &storage.Image{ + Id: "requiredImageLabelImage", + Name: &storage.ImageName{ + FullName: "docker.io/stackrox/required-image:0.1", + }, + Metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Labels: map[string]string{ + "required-label": "required-value", + }, + }, + }, + }) + + shellshockImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "shellshock", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-6271", Link: "https://shellshock", Cvss: 6}, + {Cve: "CVE-ARBITRARY", Link: "https://notshellshock"}, + }}, + }) + shellshockDep := deploymentWithImageAnyID(shellshockImage) + suite.addDepAndImages(shellshockDep, shellshockImage) + + suppressedShellshockImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "shellshock", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-6271", Link: "https://shellshock", Cvss: 6, Suppressed: true}, + {Cve: "CVE-ARBITRARY", Link: "https://notshellshock"}, + }}, + }) + suppressedShellShockDep := deploymentWithImageAnyID(suppressedShellshockImage) + suite.addDepAndImages(suppressedShellShockDep, suppressedShellshockImage) + + strutsImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2017-5638", Link: "https://struts", Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, + }}, + {Name: "OTHER", Version: "1.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-1223-451", Link: "https://cvefake"}, + }}, + }) + strutsDep := deploymentWithImageAnyID(strutsImage) + suite.addDepAndImages(strutsDep, strutsImage) + + strutsImageSuppressed := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2017-5638", Link: "https://struts", Suppressed: true, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, + }}, + {Name: "OTHER", Version: "1.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-1223-451", Link: "https://cvefake"}, + }}, + }) + strutsDepSuppressed := deploymentWithImageAnyID(strutsImageSuppressed) + suite.addDepAndImages(strutsDepSuppressed, strutsImageSuppressed) + + // When image is pull out, the deferred field is set based upon the legacy suppressed field. Therefore, both are set. + // However, here we are specifically testing whether detection is taking the new vulnerability state field into + // account by not setting the suppressed field. + structImageWithDeferredVulns := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "deferred-struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2017-5638", Link: "https://struts", State: storage.VulnerabilityState_DEFERRED, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, + {Cve: "CVE-2017-FP", Link: "https://struts", State: storage.VulnerabilityState_FALSE_POSITIVE, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, + {Cve: "CVE-2017-FAKE", Link: "https://struts", Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, + }}, + }) + structDepWithDeferredVulns := deploymentWithImageAnyID(structImageWithDeferredVulns) + suite.addDepAndImages(structDepWithDeferredVulns, structImageWithDeferredVulns) + + depWithNonSeriousVulnsImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "NOSERIOUS", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-1234-5678", Link: "https://abcdefgh"}, + {Cve: "CVE-5678-1234", Link: "https://lmnopqrst"}, + }}, + }) + depWithNonSeriousVulns := deploymentWithImageAnyID(depWithNonSeriousVulnsImage) + suite.addDepAndImages(depWithNonSeriousVulns, depWithNonSeriousVulnsImage) + + dockerSockDep := &storage.Deployment{ + Id: "DOCKERSOCDEP", + Containers: []*storage.Container{ + { + Name: "dockersock", + Volumes: []*storage.Volume{ + {Source: "/var/run/docker.sock", Name: "DOCKERSOCK", Type: "HostPath", ReadOnly: true}, + {Source: "NOTDOCKERSOCK"}, + }}, + }, + } + suite.addDepAndImages(dockerSockDep) + + crioSockDep := &storage.Deployment{ + Id: "CRIOSOCDEP", + Containers: []*storage.Container{ + { + Name: "criosock", + Volumes: []*storage.Volume{ + {Source: "/run/crio/crio.sock", Name: "CRIOSOCK", Type: "HostPath", ReadOnly: true}, + {Source: "NOTCRIORSOCK"}, + }}, + }, + } + suite.addDepAndImages(crioSockDep) + + containerPort22Dep := &storage.Deployment{ + Id: "CONTAINERPORT22DEP", + Ports: []*storage.PortConfig{ + {Protocol: "TCP", ContainerPort: 22}, + {Protocol: "UDP", ContainerPort: 4125}, + }, + } + suite.addDepAndImages(containerPort22Dep) + + secretEnvDep := &storage.Deployment{ + Id: "SECRETENVDEP", + Containers: []*storage.Container{ + { + Name: "secretenv", + Config: &storage.ContainerConfig{ + Env: []*storage.ContainerConfig_EnvironmentConfig{ + {Key: "THIS_IS_SECRET_VAR", Value: "stealthmode", EnvVarSource: storage.ContainerConfig_EnvironmentConfig_RAW}, + {Key: "HOME", Value: "/home/stackrox"}, + }, + }}, + }, + } + suite.addDepAndImages(secretEnvDep) + + secretEnvSrcUnsetDep := &storage.Deployment{ + Id: "SECRETENVSRCUNSETDEP", + Containers: []*storage.Container{ + { + Name: "secretenvsrcunset", + Config: &storage.ContainerConfig{ + Env: []*storage.ContainerConfig_EnvironmentConfig{ + {Key: "THIS_IS_SECRET_VAR", Value: "stealthmode"}, + }, + }}, + }, + } + suite.addDepAndImages(secretEnvSrcUnsetDep) + + secretKeyRefDep := &storage.Deployment{ + Id: "SECRETKEYREFDEP", + Containers: []*storage.Container{ + {Config: &storage.ContainerConfig{ + Env: []*storage.ContainerConfig_EnvironmentConfig{ + {Key: "THIS_IS_SECRET_VAR", EnvVarSource: storage.ContainerConfig_EnvironmentConfig_SECRET_KEY}, + {Key: "HOME", Value: "/home/stackrox"}, + }, + }}, + }, + } + suite.addDepAndImages(secretKeyRefDep) + + // Fake deployment that shouldn't match anything, just to make sure + // that none of our queries will accidentally match it. + suite.addDepAndImages(&storage.Deployment{Id: "FAKEID", Name: "FAKENAME"}) + + depWithGoodEmailAnnotation := &storage.Deployment{ + Id: "GOODEMAILDEPID", + Annotations: map[string]string{ + "email": "vv@stackrox.com", + }, + } + suite.addDepAndImages(depWithGoodEmailAnnotation) + + depWithOwnerAnnotation := &storage.Deployment{ + Id: "OWNERANNOTATIONDEP", + Annotations: map[string]string{ + "owner": "IOWNTHIS", + "blah": "Blah", + }, + } + suite.addDepAndImages(depWithOwnerAnnotation) + + depWithOwnerLabel := &storage.Deployment{ + Id: "OWNERLABELDEP", + Labels: map[string]string{ + "owner": "IOWNTHIS", + "blah": "Blah", + }, + } + suite.addDepAndImages(depWithOwnerLabel) + + depWitharbitraryAnnotations := &storage.Deployment{ + Id: "ARBITRARYANNOTATIONDEPID", + Annotations: map[string]string{ + "emailnot": "vv@stackrox.com", + "notemail": "vv@stackrox.com", + "ownernot": "vv", + "nowner": "vv", + }, + } + suite.addDepAndImages(depWitharbitraryAnnotations) + + depWithBadEmailAnnotation := &storage.Deployment{ + Id: "BADEMAILDEPID", + Annotations: map[string]string{ + "email": "NOTANEMAIL", + }, + } + suite.addDepAndImages(depWithBadEmailAnnotation) + + sysAdminDep := &storage.Deployment{ + Id: "SYSADMINDEPID", + Containers: []*storage.Container{ + { + Name: "cap-sys", + SecurityContext: &storage.SecurityContext{ + AddCapabilities: []string{"SYS_ADMIN"}, + }, + }, + }, + } + suite.addDepAndImages(sysAdminDep) + + depWithAllResourceLimitsRequestsSpecified := &storage.Deployment{ + Id: "ALLRESOURCESANDLIMITSDEP", + Containers: []*storage.Container{ + {Resources: &storage.Resources{ + CpuCoresRequest: 0.1, + CpuCoresLimit: 0.3, + MemoryMbLimit: 100, + MemoryMbRequest: 1251, + }}, + }, + } + suite.addDepAndImages(depWithAllResourceLimitsRequestsSpecified) + + depWithEnforcementBypassAnnotation := &storage.Deployment{ + Id: "ENFORCEMENTBYPASS", + Annotations: map[string]string{ + "admission.stackrox.io/break-glass": "ticket-1234", + "some-other": "annotation", + }, + } + suite.addDepAndImages(depWithEnforcementBypassAnnotation) + + hostMountDep := &storage.Deployment{ + Id: "HOSTMOUNT", + Containers: []*storage.Container{ + { + Name: "hostmount", + Volumes: []*storage.Volume{ + {Source: "/etc/passwd", Name: "HOSTMOUNT", Type: "HostPath"}, + {Source: "/var/lib/kubelet", Name: "KUBELET", Type: "HostPath", ReadOnly: true}, + }}, + }, + } + suite.addDepAndImages(hostMountDep) + + hostPIDDep := &storage.Deployment{ + Id: "HOSTPID", + HostPid: true, + } + suite.addDepAndImages(hostPIDDep) + + hostIPCDep := &storage.Deployment{ + Id: "HOSTIPC", + HostIpc: true, + } + suite.addDepAndImages(hostIPCDep) + + imgWithFixedByEmpty := suite.addImage(imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "EXplicitlyEmptyFixedBy", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-1234-5678", Cvss: 8, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{}}, + }}, + })) + + imgWithFixedByEmptyOnlyForSome := suite.addImage(imageWithComponents([]*storage.EmbeddedImageScanComponent{ + {Name: "EXplicitlyEmptyFixedBy", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-1234-5678", Cvss: 8, Severity: storage.VulnerabilitySeverity_CRITICAL_VULNERABILITY_SEVERITY, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{}}, + }}, + {Name: "Normal", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-5612-1245", Cvss: 8, Severity: storage.VulnerabilitySeverity_CRITICAL_VULNERABILITY_SEVERITY, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "actually_fixable"}}, + }}, + })) + + rootUserImage := &storage.Image{ + Id: "SHA:ROOTUSERIMAGE", + Name: &storage.ImageName{ + FullName: "docker.io/stackrox/rootuser:0.1", + }, + Metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + User: "root", + }, + }, + } + depWithRootUser := deploymentWithImageAnyID(rootUserImage) + suite.addDepAndImages(depWithRootUser, rootUserImage) + + updateInstructionImage := imageWithLayers([]*storage.ImageLayer{ + { + Instruction: "RUN", + Value: "apt-get update", + }, + }) + depWithUpdate := deploymentWithImageAnyID(updateInstructionImage) + suite.addDepAndImages(depWithUpdate, updateInstructionImage) + + restrictedHostPortDep := &storage.Deployment{ + Id: "RESTRICTEDHOSTPORT", + Ports: []*storage.PortConfig{ + { + ExposureInfos: []*storage.PortConfig_ExposureInfo{ + { + NodePort: 22, + }, + }, + }, + }, + } + + suite.addDepAndImages(restrictedHostPortDep) + + mountPropagationDep := &storage.Deployment{ + Id: "MOUNTPROPAGATIONDEP", + Containers: []*storage.Container{ + { + Id: "MOUNTPROPAGATIONCONTAINER", + Volumes: []*storage.Volume{ + { + Name: "ThisMountIsOnFire", + MountPropagation: storage.Volume_BIDIRECTIONAL, + }, + }, + }, + }, + } + suite.addDepAndImages(mountPropagationDep) + + noSeccompProfileDep := &storage.Deployment{ + Id: "NOSECCOMPPROFILEDEP", + Containers: []*storage.Container{ + { + SecurityContext: &storage.SecurityContext{ + SeccompProfile: &storage.SecurityContext_SeccompProfile{ + Type: storage.SecurityContext_SeccompProfile_UNCONFINED, + }, + }, + }, + }, + } + suite.addDepAndImages(noSeccompProfileDep) + + hostNetworkDep := &storage.Deployment{ + Id: "HOSTNETWORK", + HostNetwork: true, + } + suite.addDepAndImages(hostNetworkDep) + + noAppArmorProfileDep := &storage.Deployment{ + Id: "NOAPPARMORPROFILEDEP", + Containers: []*storage.Container{ + { + Name: "No AppArmor Profile", + Config: &storage.ContainerConfig{ + AppArmorProfile: "unconfined", + }, + }, + }, + } + suite.addDepAndImages(noAppArmorProfileDep) + + // Images "made by Red Hat" - coming from Red Hat registries or Red Hat remotes in quay.io + registryAccessRedhatComUnverifiedImg := suite.imageWithSignatureVerificationResults("registry.access.redhat.com/redhat/ubi8:latest", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), + Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, + }, + }, + ) + registryRedHatIoUnverifiedImg := suite.imageWithSignatureVerificationResults("registry.redhat.io/redhat/ubi8:latest", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), + Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, + }, + }, + ) + + quayOCPReleaseUnverifiedImg := suite.imageWithSignatureVerificationResults("quay.io/openshift-release-dev/ocp-release:latest", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), + Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, + }, + }, + ) + quayOCPArtDevUnverifiedImg := suite.imageWithSignatureVerificationResults("quay.io/openshift-release-dev/ocp-v4.0-art-dev:latest", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), + Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, + }, + }, + ) + + suite.addImage(registryAccessRedhatComUnverifiedImg) + suite.addImage(registryRedHatIoUnverifiedImg) + suite.addImage(quayOCPReleaseUnverifiedImg) + suite.addImage(quayOCPArtDevUnverifiedImg) + + // Index processes + bashLineage := []string{"/bin/bash"} + fixtureDepAptIndicator := suite.addIndicator(fixtureDep.GetId(), "apt", "", "/usr/bin/apt", bashLineage, 1) + sysAdminDepAptIndicator := suite.addIndicator(sysAdminDep.GetId(), "apt", "install blah", "/usr/bin/apt", bashLineage, 1) + + kubeletIndicator := suite.addIndicator(containerPort22Dep.GetId(), "curl", "-v -k -SL https://12.13.14.15:10250", "/bin/curl", bashLineage, 1) + kubeletIndicator2 := suite.addIndicator(containerPort22Dep.GetId(), "wget", "https://heapster.kube-system/metrics", "/bin/wget", bashLineage, 1) + kubeletIndicator3 := suite.addIndicator(containerPort22Dep.GetId(), "curl", "https://12.13.14.15:10250 -v -k", "/bin/curl", bashLineage, 1) + + crontabIndicator := suite.addIndicator(containerPort22Dep.GetId(), "crontab", "1 2 3 4 5 6", "/bin/crontab", bashLineage, 1) + + nmapIndicatorfixtureDep1 := suite.addIndicator(fixtureDep.GetId(), "nmap", "blah", "/usr/bin/nmap", bashLineage, 1) + nmapIndicatorfixtureDep2 := suite.addIndicator(fixtureDep.GetId(), "nmap", "blah2", "/usr/bin/nmap", bashLineage, 1) + nmapIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "nmap", "", "/usr/bin/nmap", bashLineage, 1) + + ifconfigIndicatorfixtureDep1 := suite.addIndicator(fixtureDep.GetId(), "ifconfig", "blah", "/sbin/ifconfig", bashLineage, 1) + ifconfigIndicatorfixtureDep2 := suite.addIndicator(fixtureDep.GetId(), "ifconfig", "blah2", "/usr/bin/ifconfig", bashLineage, 1) + ipIndicatorfixtureDep := suite.addIndicator(fixtureDep.GetId(), "ip", "", "/sbin/ip", bashLineage, 1) + arpIndicatorfixtureDep := suite.addIndicator(fixtureDep.GetId(), "arp", "", "/usr/sbin/arp", bashLineage, 1) + ifconfigIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "ifconfig", "", "/sbin/ifconfig", bashLineage, 1) + ipIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "ip", "", "/sbin/ip", bashLineage, 1) + arpIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "arp", "", "/usr/sbin/arp", bashLineage, 1) + // These two should not match for the Network Management Execution policy. See ROX-6011 + suite.addIndicator(fixtureDep.GetId(), "pip", "", "/usr/bin/pip", bashLineage, 1) + suite.addIndicator(nginx110Dep.GetId(), "pip", "", "/usr/bin/pip", bashLineage, 1) + + javaLineage := []string{"/bin/bash", "/mnt/scripts/run_server.sh", "/bin/java"} + fixtureDepJavaIndicator := suite.addIndicator(fixtureDep.GetId(), "/bin/bash", "-attack", "/bin/bash", javaLineage, 0) + + deploymentTestCases := []testCase{ + { + policyName: "Latest tag", + expectedViolations: map[string][]*storage.Alert_Violation{ + fixtureDep.GetId(): { + { + Message: "Container 'supervulnerable' has image with tag 'latest'", + }, + }, + }, + }, + { + policyName: "Alpine Linux Package Manager (apk) in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + apkDep.GetId(): { + { + Message: "Container 'ASFASF' includes component 'apk-tools' (version 1.2)", + }, + }, + }, + }, + { + policyName: "Ubuntu Package Manager in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + componentDeps["apt"].GetId(): { + { + Message: "Container 'ASFASF' includes component 'apt'", + }, + }, + }, + }, + { + policyName: "Curl in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + curlDep.GetId(): { + { + Message: "Container 'ASFASF' includes component 'curl' (version 1.3)", + }, + }, + }, + }, + { + policyName: "Red Hat Package Manager in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + componentDeps["dnf"].GetId(): { + { + Message: "Container 'ASFASF' includes component 'dnf'", + }, + }, + }, + }, + { + policyName: "Wget in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + componentDeps["wget"].GetId(): { + { + Message: "Container 'ASFASF' includes component 'wget'", + }, + }, + }, + }, + { + policyName: "Mount Container Runtime Socket", + expectedViolations: map[string][]*storage.Alert_Violation{ + dockerSockDep.GetId(): { + { + Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'", + }, + }, + crioSockDep.GetId(): { + { + Message: "Read-only volume 'CRIOSOCK' has source '/run/crio/crio.sock' and type 'HostPath'", + }, + }, + }, + }, + { + policyName: "90-Day Image Age", + expectedViolations: map[string][]*storage.Alert_Violation{ + oldImageDep.GetId(): { + { + Message: fmt.Sprintf("Container 'oldimage' has image created at %s (UTC)", readable.Time(oldImageCreationTime)), + }, + }, + }, + }, + { + policyName: "30-Day Scan Age", + expectedViolations: map[string][]*storage.Alert_Violation{ + oldScannedDep.GetId(): { + { + Message: fmt.Sprintf("Container 'oldscannedimage' has image last scanned at %s (UTC)", readable.Time(oldScannedTime)), + }, + }, + }, + }, + { + policyName: "Secure Shell (ssh) Port Exposed in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + imagePort22Dep.GetId(): { + { + Message: "Dockerfile line 'EXPOSE 22/tcp' found in container 'ASFASF'", + }, + }, + }, + }, + { + policyName: "Secure Shell (ssh) Port Exposed", + expectedViolations: map[string][]*storage.Alert_Violation{ + containerPort22Dep.GetId(): { + { + Message: "Exposed port 22/TCP is present", + }, + }, + }, + }, + { + policyName: "Privileged Container", + expectedViolations: map[string][]*storage.Alert_Violation{ + fixtureDep.GetId(): { + { + Message: "Container 'nginx110container' is privileged", + }, + }, + heartbleedDep.GetId(): { + { + Message: "Container 'nginx' is privileged", + }, + }, + }, + }, + { + policyName: "Container using read-write root filesystem", + expectedViolations: map[string][]*storage.Alert_Violation{ + heartbleedDep.GetId(): { + { + Message: "Container 'nginx' uses a read-write root filesystem", + }, + }, + fixtureDep.GetId(): { + { + Message: "Container 'nginx110container' uses a read-write root filesystem", + }, + }, + sysAdminDep.GetId(): { + { + Message: "Container 'cap-sys' uses a read-write root filesystem", + }, + }, + noSeccompProfileDep.GetId(): { + { + Message: "Container uses a read-write root filesystem", + }, + }, + }, + }, + { + policyName: "Insecure specified in CMD", + expectedViolations: map[string][]*storage.Alert_Violation{ + insecureCMDDep.GetId(): { + { + Message: "Dockerfile line 'CMD do an insecure thing' found in container 'ASFASF'", + }, + }, + }, + }, + { + policyName: "Improper Usage of Orchestrator Secrets Volume", + expectedViolations: map[string][]*storage.Alert_Violation{ + runSecretsDep.GetId(): { + { + Message: "Dockerfile line 'VOLUME /run/secrets' found in container 'ASFASF'", + }, + }, + runSecretsArrayDep.GetId(): { + { + Message: "Dockerfile line 'VOLUME [/run/secrets]' found in container 'ASFASF'", + }, + }, + runSecretsListDep.GetId(): { + { + Message: "Dockerfile line 'VOLUME /var/something /run/secrets' found in container 'ASFASF'", + }, + }, + runSecretsArrayListDep.GetId(): { + { + Message: "Dockerfile line 'VOLUME [/var/something /run/secrets]' found in container 'ASFASF'", + }, + }, + }, + }, + { + policyName: "Images with no scans", + shouldNotMatch: map[string]struct{}{ + // These deployments have scans on their images. + fixtureDep.GetId(): {}, + oldScannedDep.GetId(): {}, + heartbleedDep.GetId(): {}, + apkDep.GetId(): {}, + curlDep.GetId(): {}, + componentDeps["apt"].GetId(): {}, + componentDeps["dnf"].GetId(): {}, + componentDeps["wget"].GetId(): {}, + shellshockDep.GetId(): {}, + suppressedShellShockDep.GetId(): {}, + strutsDep.GetId(): {}, + strutsDepSuppressed.GetId(): {}, + structDepWithDeferredVulns.GetId(): {}, + depWithNonSeriousVulns.GetId(): {}, + // The rest of the deployments have no images! + "FAKEID": {}, + containerPort22Dep.GetId(): {}, + dockerSockDep.GetId(): {}, + crioSockDep.GetId(): {}, + secretEnvDep.GetId(): {}, + secretEnvSrcUnsetDep.GetId(): {}, + secretKeyRefDep.GetId(): {}, + depWithOwnerAnnotation.GetId(): {}, + depWithOwnerLabel.GetId(): {}, + depWithGoodEmailAnnotation.GetId(): {}, + depWithBadEmailAnnotation.GetId(): {}, + depWitharbitraryAnnotations.GetId(): {}, + sysAdminDep.GetId(): {}, + depWithAllResourceLimitsRequestsSpecified.GetId(): {}, + depWithEnforcementBypassAnnotation.GetId(): {}, + hostMountDep.GetId(): {}, + restrictedHostPortDep.GetId(): {}, + hostPIDDep.GetId(): {}, + hostIPCDep.GetId(): {}, + mountPropagationDep.GetId(): {}, + noSeccompProfileDep.GetId(): {}, + hostNetworkDep.GetId(): {}, + noAppArmorProfileDep.GetId(): {}, + }, + sampleViolationForMatched: "Image in container '%s' has not been scanned", + }, + { + policyName: "Required Annotation: Email", + shouldNotMatch: map[string]struct{}{ + depWithGoodEmailAnnotation.GetId(): {}, + }, + sampleViolationForMatched: "Required annotation not found (key = 'email', value = '[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+')", + }, + { + policyName: "Required Label: Owner/Team", + shouldNotMatch: map[string]struct{}{ + depWithOwnerLabel.GetId(): {}, + fixtureDep.GetId(): {}, + }, + sampleViolationForMatched: "Required label not found (key = 'owner|team', value = '.+')", + }, + { + policyName: "Required Annotation: Owner/Team", + shouldNotMatch: map[string]struct{}{ + depWithOwnerAnnotation.GetId(): {}, + fixtureDep.GetId(): {}, + }, + sampleViolationForMatched: "Required annotation not found (key = 'owner|team', value = '.+')", + }, + { + policyName: "CAP_SYS_ADMIN capability added", + expectedViolations: map[string][]*storage.Alert_Violation{ + sysAdminDep.GetId(): { + { + Message: "Container 'cap-sys' adds capability SYS_ADMIN", + }, + }, + fixtureDep.GetId(): { + { + Message: "Container 'nginx110container' adds capability SYS_ADMIN", + }, + }, + }, + }, + { + policyName: "Apache Struts: CVE-2017-5638", + expectedViolations: map[string][]*storage.Alert_Violation{ + strutsDep.GetId(): { + { + Message: "CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF'", + }, + }, + // CVE-2017-5638 is deferred in `deferred-struct`, hence no violation. + }, + }, + { + policyName: "No CPU request or memory limit specified", + expectedViolations: map[string][]*storage.Alert_Violation{ + fixtureDep.GetId(): { + {Message: "Memory limit set to 0 MB for container 'nginx110container'"}, + }, + }, + }, + { + policyName: "Environment Variable Contains Secret", + expectedViolations: map[string][]*storage.Alert_Violation{ + secretEnvDep.GetId(): { + { + Message: "Environment variable 'THIS_IS_SECRET_VAR' is present in container 'secretenv'", + }, + }, + }, + }, + { + policyName: "Secret Mounted as Environment Variable", + expectedViolations: map[string][]*storage.Alert_Violation{ + secretKeyRefDep.GetId(): { + { + Message: "Environment variable 'THIS_IS_SECRET_VAR' is present and references a Secret", + }, + }, + }, + }, + { + policyName: "Fixable CVSS >= 6 and Privileged", + expectedViolations: map[string][]*storage.Alert_Violation{ + heartbleedDep.GetId(): { + { + Message: "Container 'nginx' is privileged", + }, + { + Message: "Fixable CVE-2014-0160 (CVSS 6) (severity Unknown) found in component 'heartbleed' (version 1.2) in container 'nginx', resolved by version v1.2", + }, + }, + }, + }, + { + policyName: "Fixable CVSS >= 7", + expectedViolations: map[string][]*storage.Alert_Violation{ + strutsDep.GetId(): { + { + Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + structDepWithDeferredVulns.GetId(): { + { + Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + }, + }, + { + policyName: "Fixable Severity at least Important", + expectedViolations: map[string][]*storage.Alert_Violation{ + strutsDep.GetId(): { + { + Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + structDepWithDeferredVulns.GetId(): { + { + Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + }, + }, + { + policyName: "ADD Command used instead of COPY", + expectedViolations: map[string][]*storage.Alert_Violation{ + addDockerFileDep.GetId(): { + { + Message: "Dockerfile line 'ADD deploy.sh' found in container 'ASFASF'", + }, + }, + fixtureDep.GetId(): { + { + Message: "Dockerfile line 'ADD FILE:blah' found in container 'nginx110container'", + }, + { + Message: "Dockerfile line 'ADD file:4eedf861fb567fffb2694b65ebd...' found in container 'supervulnerable'", + }, + }, + }, + }, + { + policyName: "nmap Execution", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + fixtureDep.GetId(): {nmapIndicatorfixtureDep1, nmapIndicatorfixtureDep2}, + nginx110Dep.GetId(): {nmapIndicatorNginx110Dep}, + }, + }, + { + policyName: "Process Targeting Cluster Kubelet Endpoint", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + containerPort22Dep.GetId(): {kubeletIndicator, kubeletIndicator2, kubeletIndicator3}, + }, + }, + { + policyName: "crontab Execution", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + containerPort22Dep.GetId(): {crontabIndicator}, + }, + }, + { + policyName: "Ubuntu Package Manager Execution", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + fixtureDep.GetId(): {fixtureDepAptIndicator}, + sysAdminDep.GetId(): {sysAdminDepAptIndicator}, + }, + }, + { + policyName: "Process with UID 0", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + fixtureDep.GetId(): {fixtureDepJavaIndicator}, + }, + }, + { + policyName: "Shell Spawned by Java Application", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + fixtureDep.GetId(): {fixtureDepJavaIndicator}, + }, + }, + { + policyName: "Network Management Execution", + expectedProcessViolations: map[string][]*storage.ProcessIndicator{ + fixtureDep.GetId(): {ifconfigIndicatorfixtureDep1, ifconfigIndicatorfixtureDep2, ipIndicatorfixtureDep, arpIndicatorfixtureDep}, + nginx110Dep.GetId(): {ifconfigIndicatorNginx110Dep, ipIndicatorNginx110Dep, arpIndicatorNginx110Dep}, + }, + }, + { + policyName: "Emergency Deployment Annotation", + expectedViolations: map[string][]*storage.Alert_Violation{ + depWithEnforcementBypassAnnotation.GetId(): { + {Message: "Disallowed annotations found: admission.stackrox.io/break-glass=ticket-1234"}, + }, + }, + }, + { + policyName: "Mounting Sensitive Host Directories", + expectedViolations: map[string][]*storage.Alert_Violation{ + hostMountDep.GetId(): { + {Message: "Read-only volume 'KUBELET' has source '/var/lib/kubelet' and type 'HostPath'"}, + {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, + }, + dockerSockDep.GetId(): { + {Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'"}, + }, + }, + }, + { + policyName: writableHostMountPolicyName, + expectedViolations: map[string][]*storage.Alert_Violation{ + hostMountDep.GetId(): { + {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, + }, + }, + }, + { + policyName: "Docker CIS 4.1: Ensure That a User for the Container Has Been Created", + expectedViolations: map[string][]*storage.Alert_Violation{ + depWithRootUser.GetId(): { + { + Message: "Container 'rootuser' has image with user 'root'", + }, + }, + }, + }, + { + policyName: "Docker CIS 4.7: Alert on Update Instruction", + expectedViolations: map[string][]*storage.Alert_Violation{ + depWithUpdate.GetId(): { + { + Message: "Dockerfile line 'RUN apt-get update' found in container 'ASFASF'", + }, + }, + }, + }, + { + policyName: "Docker CIS 5.7: Ensure privileged ports are not mapped within containers", + expectedViolations: map[string][]*storage.Alert_Violation{ + restrictedHostPortDep.GetId(): { + { + Message: "Exposed node port 22 is present", + }, + }, + }, + }, + { + policyName: "Docker CIS 5.15: Ensure that the host's process namespace is not shared", + expectedViolations: map[string][]*storage.Alert_Violation{ + hostPIDDep.GetId(): { + {Message: "Deployment uses the host's process ID namespace"}, + }, + }, + }, + { + policyName: "Docker CIS 5.16: Ensure that the host's IPC namespace is not shared", + expectedViolations: map[string][]*storage.Alert_Violation{ + hostIPCDep.GetId(): { + {Message: "Deployment uses the host's IPC namespace"}, + }, + }, + }, + { + policyName: "Docker CIS 5.19: Ensure mount propagation mode is not enabled", + expectedViolations: map[string][]*storage.Alert_Violation{ + mountPropagationDep.GetId(): { + {Message: "Writable volume 'ThisMountIsOnFire' has mount propagation 'bidirectional'"}, + }, + }, + }, + { + policyName: "Docker CIS 5.21: Ensure the default seccomp profile is not disabled", + expectedViolations: map[string][]*storage.Alert_Violation{ + noSeccompProfileDep.GetId(): { + {Message: "Container has Seccomp profile type 'unconfined'"}, + }, + }, + }, + { + policyName: "Docker CIS 5.9 and 5.20: Ensure that the host's network namespace is not shared", + expectedViolations: map[string][]*storage.Alert_Violation{ + hostNetworkDep.GetId(): { + {Message: "Deployment uses the host's network namespace"}, + }, + }, + }, + { + policyName: "Docker CIS 5.1 Ensure that, if applicable, an AppArmor Profile is enabled", + expectedViolations: map[string][]*storage.Alert_Violation{ + noAppArmorProfileDep.GetId(): { + {Message: "Container 'No AppArmor Profile' has AppArmor profile type 'unconfined'"}, + }, + }, + }, + { + policyName: "Docker CIS 4.4: Ensure images are scanned and rebuilt to include security patches", + allowUnvalidatedViolations: true, + expectedViolations: map[string][]*storage.Alert_Violation{ + strutsDep.GetId(): { + { + Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + heartbleedDep.GetId(): { + { + Message: "Fixable CVE-2014-0160 (CVSS 6) (severity Unknown) found in component 'heartbleed' (version 1.2) in container 'nginx', resolved by version v1.2", + }, + }, + fixtureDep.GetId(): { + { + Message: "Fixable CVE-2014-6200 (CVSS 5) (severity Moderate) found in component 'name' (version 1.2.3.4) in container 'supervulnerable', resolved by version abcdefg", + }, + }, + fixtures.LightweightDeployment().GetId(): { + { + Message: "Fixable CVE-2014-6200 (CVSS 5) (severity Moderate) found in component 'name' (version 1.2.3.4) in container 'supervulnerable', resolved by version abcdefg", + }, + }, + structDepWithDeferredVulns.GetId(): { + { + Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", + }, + }, + }, + }, + { + policyName: anyHostPathPolicyName, + expectedViolations: map[string][]*storage.Alert_Violation{ + dockerSockDep.GetId(): { + {Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'"}, + }, + crioSockDep.GetId(): { + {Message: "Read-only volume 'CRIOSOCK' has source '/run/crio/crio.sock' and type 'HostPath'"}, + }, + hostMountDep.GetId(): { + {Message: "Read-only volume 'KUBELET' has source '/var/lib/kubelet' and type 'HostPath'"}, + {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, + }, + }, + }, + } + + for _, c := range deploymentTestCases { + p := suite.MustGetPolicy(c.policyName) + suite.T().Run(fmt.Sprintf("%s (on deployments)", c.policyName), func(t *testing.T) { + if len(c.shouldNotMatch) == 0 { + assert.True(t, (c.expectedViolations != nil) != (c.expectedProcessViolations != nil), "Every test case must "+ + "contain exactly one of expectedViolations and expectedProcessViolations") + } else { + assert.Nil(t, c.expectedViolations, "Cannot specify shouldNotMatch AND expectedViolations") + assert.Nil(t, c.expectedProcessViolations, "Cannot specify shouldNotMatch AND expectedProcessViolations") + } + + m, err := BuildDeploymentMatcher(p) + require.NoError(t, err) + + if c.expectedProcessViolations != nil { + processMatcher, err := BuildDeploymentWithProcessMatcher(p) + require.NoError(t, err) + for deploymentID, processes := range c.expectedProcessViolations { + expectedProcesses := set.NewStringSet(sliceutils.Map(processes, func(p *storage.ProcessIndicator) string { + return p.GetId() + })...) + deployment := suite.deployments[deploymentID] + + for _, process := range suite.deploymentsToIndicators[deploymentID] { + match := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { + return processMatcher.MatchDeploymentWithProcess(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), process, false) + }) + require.NoError(t, err) + if expectedProcesses.Contains(process.GetId()) { + assert.NotNil(t, match.ProcessViolation, "process %+v should match", process) + } else { + assert.Nil(t, match.ProcessViolation, "process %+v should not match", process) + } + } + } + return + } + + actualViolations := make(map[string][]*storage.Alert_Violation) + for id, deployment := range suite.deployments { + violationsForDep := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { + return m.MatchDeployment(cache, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + }) + assert.Nil(t, violationsForDep.ProcessViolation) + if alertViolations := violationsForDep.AlertViolations; len(alertViolations) > 0 { + actualViolations[id] = alertViolations + } + } + if len(c.shouldNotMatch) > 0 { + for shouldNotMatchID := range c.shouldNotMatch { + assert.Contains(t, suite.deployments, shouldNotMatchID) + assert.NotContains(t, actualViolations, shouldNotMatchID) + } + for id := range suite.deployments { + if _, shouldNotMatch := c.shouldNotMatch[id]; !shouldNotMatch { + assert.Contains(t, actualViolations, id) + + // TODO(rc) update for BPL and check all sampleViolationForMatched + if c.policyName == "Images with no scans" { + if len(suite.deployments[id].GetContainers()) == 1 { + msg := fmt.Sprintf(c.sampleViolationForMatched, suite.deployments[id].GetContainers()[0].GetName()) + protoassert.SlicesEqual(t, actualViolations[id], []*storage.Alert_Violation{{Message: msg}}) + } + } + } + } + return + } + + for id := range suite.deployments { + violations, expected := c.expectedViolations[id] + if expected { + assert.Contains(t, actualViolations, id) + + if c.allowUnvalidatedViolations { + assert.NotEmpty(t, violations) + for _, violation := range violations { + protoassert.SliceContains(t, actualViolations[id], violation) + } + } else { + protoassert.SlicesEqual(t, violations, actualViolations[id]) + } + } else { + assert.NotContains(t, actualViolations, id) + } + } + + }) + } + + imageTestCases := []testCase{ + { + policyName: "Latest tag", + expectedViolations: map[string][]*storage.Alert_Violation{ + fixtureDep.GetContainers()[1].GetImage().GetId(): { + {Message: "Image has tag 'latest'"}, + }, + }, + }, + { + policyName: "Alpine Linux Package Manager (apk) in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(apkDep): { + { + Message: "Image includes component 'apk-tools' (version 1.2)", + }, + }, + }, + }, + { + policyName: "Ubuntu Package Manager in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(componentDeps["apt"]): { + { + Message: "Image includes component 'apt'", + }, + }, + }, + }, + { + policyName: "Curl in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(curlDep): { + { + Message: "Image includes component 'curl' (version 1.3)", + }, + }, + }, + }, + { + policyName: "Red Hat Package Manager in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(componentDeps["dnf"]): { + { + Message: "Image includes component 'dnf'", + }, + }, + }, + }, + { + policyName: "Wget in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(componentDeps["wget"]): { + { + Message: "Image includes component 'wget'", + }, + }, + }, + }, + { + policyName: "90-Day Image Age", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(oldImageDep): { + { + Message: fmt.Sprintf("Image was created at %s (UTC)", readable.Time(oldImageCreationTime)), + }, + }, + }, + }, + { + policyName: "30-Day Scan Age", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(oldScannedDep): { + { + Message: fmt.Sprintf("Image was last scanned at %s (UTC)", readable.Time(oldScannedTime)), + }, + }, + }, + }, + { + policyName: "Secure Shell (ssh) Port Exposed in Image", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(imagePort22Dep): { + { + Message: "Dockerfile line 'EXPOSE 22/tcp' found", + }, + }, + }, + }, + { + policyName: "Insecure specified in CMD", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(insecureCMDDep): { + { + Message: "Dockerfile line 'CMD do an insecure thing' found", + }, + }, + }, + }, + { + policyName: "Improper Usage of Orchestrator Secrets Volume", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(runSecretsDep): { + { + Message: "Dockerfile line 'VOLUME /run/secrets' found", + }, + }, + suite.imageIDFromDep(runSecretsArrayDep): { + { + Message: "Dockerfile line 'VOLUME [/run/secrets]' found", + }, + }, + suite.imageIDFromDep(runSecretsListDep): { + { + Message: "Dockerfile line 'VOLUME /var/something /run/secrets' found", + }, + }, + suite.imageIDFromDep(runSecretsArrayListDep): { + { + Message: "Dockerfile line 'VOLUME [/var/something /run/secrets]' found", + }, + }, + }, + }, + { + policyName: "Images with no scans", + shouldNotMatch: map[string]struct{}{ + oldScannedImage.GetId(): {}, + suite.imageIDFromDep(heartbleedDep): {}, + apkImage.GetId(): {}, + curlImage.GetId(): {}, + suite.imageIDFromDep(componentDeps["apt"]): {}, + suite.imageIDFromDep(componentDeps["dnf"]): {}, + suite.imageIDFromDep(componentDeps["wget"]): {}, + shellshockImage.GetId(): {}, + suppressedShellshockImage.GetId(): {}, + strutsImage.GetId(): {}, + strutsImageSuppressed.GetId(): {}, + structImageWithDeferredVulns.GetId(): {}, + depWithNonSeriousVulnsImage.GetId(): {}, + fixtureDep.GetContainers()[0].GetImage().GetId(): {}, + fixtureDep.GetContainers()[1].GetImage().GetId(): {}, + suite.imageIDFromDep(oldScannedDep): {}, + imgWithFixedByEmpty.GetId(): {}, + imgWithFixedByEmptyOnlyForSome.GetId(): {}, + }, + sampleViolationForMatched: "Image has not been scanned", + expectedViolations: map[string][]*storage.Alert_Violation{}, + }, + { + policyName: "Apache Struts: CVE-2017-5638", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(strutsDep): { + { + Message: "CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2)", + }, + }, + }, + }, + { + policyName: "Fixable CVSS >= 7", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(strutsDep): { + { + Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2), resolved by version v1.3", + }, + }, + imgWithFixedByEmptyOnlyForSome.GetId(): { + { + Message: "Fixable CVE-5612-1245 (CVSS 8) (severity Critical) found in component 'Normal' (version 2.3), resolved by version actually_fixable", + }, + }, + }, + }, + { + policyName: "Fixable Severity at least Important", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(strutsDep): { + { + Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2), resolved by version v1.3", + }, + }, + imgWithFixedByEmptyOnlyForSome.GetId(): { + { + Message: "Fixable CVE-5612-1245 (CVSS 8) (severity Critical) found in component 'Normal' (version 2.3), resolved by version actually_fixable", + }, + }, + }, + }, + { + policyName: "ADD Command used instead of COPY", + expectedViolations: map[string][]*storage.Alert_Violation{ + suite.imageIDFromDep(addDockerFileDep): { + { + Message: "Dockerfile line 'ADD deploy.sh' found", + }, + }, + fixtureDep.GetContainers()[0].GetImage().GetId(): { + { + Message: "Dockerfile line 'ADD FILE:blah' found", + }, + }, + fixtureDep.GetContainers()[1].GetImage().GetId(): { + { + Message: "Dockerfile line 'ADD file:4eedf861fb567fffb2694b65ebd...' found", + }, + }, + }, + }, + { + policyName: "Required Image Label", + shouldNotMatch: map[string]struct{}{ + "requiredImageLabelImage": {}, + }, + sampleViolationForMatched: "Required label not found (found labels: )", + }, + + { + // We can only test that the policy triggers for unverified images. The "shouldNotMatch" field cannot be + // used to verify that signed images don't trigger violations, because then the logic expects that all + // other images (not listed in shouldNotMatch) trigger a violation; and in this case only unsigned images + // in Red Hat registries trigger violations - any other unsigned images are fine and should not trigger. + policyName: "Red Hat images must be signed by a Red Hat release key", + expectedViolations: map[string][]*storage.Alert_Violation{ + registryRedHatIoUnverifiedImg.GetId(): { + { + Message: "Image has registry 'registry.redhat.io'", + }, + { + Message: "Image signature is not verified by the specified signature integration(s).", + }, + }, + registryAccessRedhatComUnverifiedImg.GetId(): { + { + Message: "Image has registry 'registry.access.redhat.com'", + }, + { + Message: "Image signature is not verified by the specified signature integration(s).", + }, + }, + quayOCPReleaseUnverifiedImg.GetId(): { + { + Message: "Image has registry 'quay.io' and remote 'openshift-release-dev/ocp-release'", + }, + { + Message: "Image signature is not verified by the specified signature integration(s).", + }, + }, + quayOCPArtDevUnverifiedImg.GetId(): { + { + Message: "Image has registry 'quay.io' and remote 'openshift-release-dev/ocp-v4.0-art-dev'", + }, + { + Message: "Image signature is not verified by the specified signature integration(s).", + }, + }, + }, + }, + } + + for _, c := range imageTestCases { + p := suite.MustGetPolicy(c.policyName) + suite.T().Run(fmt.Sprintf("%s (on images)", c.policyName), func(t *testing.T) { + assert.Nil(t, c.expectedProcessViolations) + + m, err := BuildImageMatcher(p) + require.NoError(t, err) + + actualViolations := make(map[string][]*storage.Alert_Violation) + for id, image := range suite.images { + violationsForImg := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { + return m.MatchImage(cache, image) + }) + suite.Nil(violationsForImg.ProcessViolation) + if alertViolations := violationsForImg.AlertViolations; len(alertViolations) > 0 { + actualViolations[id] = alertViolations + } + } + + for id, violations := range c.expectedViolations { + assert.Contains(t, actualViolations, id) + protoassert.SlicesEqual(t, violations, actualViolations[id]) + } + if len(c.shouldNotMatch) > 0 { + if c.policyName == "Required Image Label" { + for id, image := range suite.images { + if image.GetMetadata() == nil { + c.shouldNotMatch[id] = struct{}{} + } + } + } + + for shouldNotMatchID := range c.shouldNotMatch { + assert.Contains(t, suite.images, shouldNotMatchID, "%s is not a known image id in the suite", shouldNotMatchID) + assert.NotContains(t, actualViolations, shouldNotMatchID) + } + + for id := range suite.images { + if _, shouldNotMatch := c.shouldNotMatch[id]; !shouldNotMatch { + assert.Contains(t, actualViolations, id) + protoassert.SlicesEqual(t, actualViolations[id], []*storage.Alert_Violation{{Message: c.sampleViolationForMatched}}) + } + } + } + }) + } +} diff --git a/pkg/booleanpolicy/default_policies_test.go b/pkg/booleanpolicy/default_policies_test.go deleted file mode 100644 index 30d3a641f3856..0000000000000 --- a/pkg/booleanpolicy/default_policies_test.go +++ /dev/null @@ -1,4162 +0,0 @@ -package booleanpolicy - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "testing" - "time" - - "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/booleanpolicy/augmentedobjs" - "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" - "github.com/stackrox/rox/pkg/booleanpolicy/policyversion" - "github.com/stackrox/rox/pkg/booleanpolicy/violationmessages/printer" - "github.com/stackrox/rox/pkg/defaults/policies" - "github.com/stackrox/rox/pkg/features" - "github.com/stackrox/rox/pkg/fixtures" - "github.com/stackrox/rox/pkg/images/types" - imgUtils "github.com/stackrox/rox/pkg/images/utils" - "github.com/stackrox/rox/pkg/kubernetes" - policyUtils "github.com/stackrox/rox/pkg/policies" - "github.com/stackrox/rox/pkg/protoassert" - "github.com/stackrox/rox/pkg/protocompat" - "github.com/stackrox/rox/pkg/protoconv" - "github.com/stackrox/rox/pkg/protoutils" - "github.com/stackrox/rox/pkg/readable" - "github.com/stackrox/rox/pkg/set" - "github.com/stackrox/rox/pkg/signatures" - "github.com/stackrox/rox/pkg/sliceutils" - "github.com/stackrox/rox/pkg/uuid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" -) - -const ( - writableHostMountPolicyName = "Writeable Host Mount" - anyHostPathPolicyName = "Any Host Path" -) - -func changeName(p *storage.Policy, newName string) *storage.Policy { - p.Name = newName - return p -} - -func enhancedDeployment(dep *storage.Deployment, images []*storage.Image) EnhancedDeployment { - return EnhancedDeployment{ - Deployment: dep, - Images: images, - NetworkPoliciesApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: true, - HasEgressNetworkPolicy: true, - }, - } -} - -func enhancedDeploymentWithNetworkPolicies(dep *storage.Deployment, images []*storage.Image, netpolApplied *augmentedobjs.NetworkPoliciesApplied) EnhancedDeployment { - return EnhancedDeployment{ - Deployment: dep, - Images: images, - NetworkPoliciesApplied: netpolApplied, - } -} - -func TestDefaultPolicies(t *testing.T) { - t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") - suite.Run(t, new(DefaultPoliciesTestSuite)) -} - -type DefaultPoliciesTestSuite struct { - suite.Suite - - defaultPolicies map[string]*storage.Policy - customPolicies map[string]*storage.Policy - - deployments map[string]*storage.Deployment - images map[string]*storage.Image - deploymentsToImages map[string][]*storage.Image - deploymentsToIndicators map[string][]*storage.ProcessIndicator -} - -func (suite *DefaultPoliciesTestSuite) SetupSuite() { - defaultPolicies, err := policies.DefaultPolicies() - suite.Require().NoError(err) - - suite.defaultPolicies = make(map[string]*storage.Policy, len(defaultPolicies)) - for _, p := range defaultPolicies { - suite.defaultPolicies[p.GetName()] = p - } - - suite.customPolicies = make(map[string]*storage.Policy) - for _, customPolicy := range []*storage.Policy{ - changeName(policyWithSingleKeyValue(fieldnames.WritableHostMount, "true", false), writableHostMountPolicyName), - changeName(policyWithSingleKeyValue(fieldnames.VolumeType, "hostpath", false), anyHostPathPolicyName), - } { - suite.customPolicies[customPolicy.GetName()] = customPolicy - } -} - -func (suite *DefaultPoliciesTestSuite) TearDownSuite() {} - -func (suite *DefaultPoliciesTestSuite) SetupTest() { - suite.deployments = make(map[string]*storage.Deployment) - suite.images = make(map[string]*storage.Image) - suite.deploymentsToImages = make(map[string][]*storage.Image) - suite.deploymentsToIndicators = make(map[string][]*storage.ProcessIndicator) -} - -func (suite *DefaultPoliciesTestSuite) imageIDFromDep(deployment *storage.Deployment) string { - suite.Require().Len(deployment.GetContainers(), 1, "This function only supports deployments with exactly one container") - id := deployment.GetContainers()[0].GetImage().GetId() - suite.NotEmpty(id, "Deployment '%s' had no image id", protocompat.MarshalTextString(deployment)) - return id -} - -func (suite *DefaultPoliciesTestSuite) TestNVDCVSSCriteria() { - heartbleedDep := &storage.Deployment{ - Id: "HEARTBLEEDDEPID", - Containers: []*storage.Container{ - { - Name: "nginx", - SecurityContext: &storage.SecurityContext{Privileged: true}, - Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, - }, - }, - } - - ts := time.Now().AddDate(0, 0, -5) - protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) - require.NoError(suite.T(), err) - - suite.addDepAndImages(heartbleedDep, &storage.Image{ - Id: "HEARTBLEEDDEPSHA", - Name: &storage.ImageName{FullName: "heartbleed"}, - Scan: &storage.ImageScan{ - Components: []*storage.EmbeddedImageScanComponent{ - {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, NvdCvss: 8, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, - FirstImageOccurrence: protoTs}, - }}, - }, - }, - }) - - nvdCvssPolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.NvdCvss, - Values: []*storage.PolicyValue{ - { - Value: "> 6", - }, - }, - } - - policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, nvdCvssPolicyGroup) - - deployment := suite.deployments["HEARTBLEEDDEPID"] - depMatcher, err := BuildDeploymentMatcher(policy) - require.NoError(suite.T(), err) - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - require.Len(suite.T(), violations.AlertViolations, 1) - require.NoError(suite.T(), err) - require.Contains(suite.T(), violations.AlertViolations[0].GetMessage(), "NVD CVSS") - -} - -func (suite *DefaultPoliciesTestSuite) TestFixableAndImageFirstOccurenceCriteria() { - heartbleedDep := &storage.Deployment{ - Id: "HEARTBLEEDDEPID", - Containers: []*storage.Container{ - { - Name: "nginx", - SecurityContext: &storage.SecurityContext{Privileged: true}, - Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, - }, - }, - } - - ts := time.Now().AddDate(0, 0, -5) - protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) - require.NoError(suite.T(), err) - - suite.addDepAndImages(heartbleedDep, &storage.Image{ - Id: "HEARTBLEEDDEPSHA", - Name: &storage.ImageName{FullName: "heartbleed"}, - Scan: &storage.ImageScan{ - Components: []*storage.EmbeddedImageScanComponent{ - {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, - FirstImageOccurrence: protoTs}, - }}, - }, - }, - }) - - fixablePolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.Fixable, - Values: []*storage.PolicyValue{{Value: "true"}}, - } - firstImageOccurrenceGroup := &storage.PolicyGroup{ - FieldName: fieldnames.DaysSinceImageFirstDiscovered, - Values: []*storage.PolicyValue{{Value: "2"}}, - } - - policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, firstImageOccurrenceGroup) - - deployment := suite.deployments["HEARTBLEEDDEPID"] - depMatcher, err := BuildDeploymentMatcher(policy) - require.NoError(suite.T(), err) - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - require.Len(suite.T(), violations.AlertViolations, 1) - require.NoError(suite.T(), err) - -} - -func (suite *DefaultPoliciesTestSuite) TestFixableAndFixTimestampAvailableCriteria() { - heartbleedDep := &storage.Deployment{ - Id: "HEARTBLEEDDEPID", - Containers: []*storage.Container{ - { - Name: "nginx", - SecurityContext: &storage.SecurityContext{Privileged: true}, - Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, - }, - }, - } - - ts := time.Now().AddDate(0, 0, -5) - protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) - require.NoError(suite.T(), err) - - suite.addDepAndImages(heartbleedDep, &storage.Image{ - Id: "HEARTBLEEDDEPSHA", - Name: &storage.ImageName{FullName: "heartbleed"}, - Scan: &storage.ImageScan{ - Components: []*storage.EmbeddedImageScanComponent{ - {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, - FixAvailableTimestamp: protoTs}, - }}, - }, - }, - }) - - fixablePolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.Fixable, - Values: []*storage.PolicyValue{{Value: "true"}}, - } - fixTimestampAvailableGroup := &storage.PolicyGroup{ - FieldName: fieldnames.DaysSinceFixAvailable, - Values: []*storage.PolicyValue{{Value: "2"}}, - } - - policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, fixTimestampAvailableGroup) - - deployment := suite.deployments["HEARTBLEEDDEPID"] - depMatcher, err := BuildDeploymentMatcher(policy) - require.NoError(suite.T(), err) - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - require.Len(suite.T(), violations.AlertViolations, 1) - require.NoError(suite.T(), err) - -} - -func (suite *DefaultPoliciesTestSuite) TestDaysSinceCVEPublishedCriteria() { - heartbleedDep := &storage.Deployment{ - Id: "HEARTBLEEDDEPID", - Containers: []*storage.Container{ - { - Name: "nginx", - SecurityContext: &storage.SecurityContext{Privileged: true}, - Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, - }, - }, - } - - ts := time.Now().AddDate(0, 0, -5) - protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) - require.NoError(suite.T(), err) - - suite.addDepAndImages(heartbleedDep, &storage.Image{ - Id: "HEARTBLEEDDEPSHA", - Name: &storage.ImageName{FullName: "heartbleed"}, - Scan: &storage.ImageScan{ - Components: []*storage.EmbeddedImageScanComponent{ - {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, - PublishedOn: protoTs}, - }}, - }, - }, - }) - - fixablePolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.Fixable, - Values: []*storage.PolicyValue{{Value: "true"}}, - } - cvePublishedGroup := &storage.PolicyGroup{ - FieldName: fieldnames.DaysSincePublished, - Values: []*storage.PolicyValue{{Value: "2"}}, - } - - policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, cvePublishedGroup) - - deployment := suite.deployments["HEARTBLEEDDEPID"] - depMatcher, err := BuildDeploymentMatcher(policy) - require.NoError(suite.T(), err) - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - require.Len(suite.T(), violations.AlertViolations, 1) - require.NoError(suite.T(), err) - -} - -func (suite *DefaultPoliciesTestSuite) TestNoDuplicatePolicyIDs() { - ids := set.NewStringSet() - for _, p := range suite.defaultPolicies { - suite.True(ids.Add(p.GetId())) - } -} - -func (suite *DefaultPoliciesTestSuite) MustGetPolicy(name string) *storage.Policy { - p := suite.defaultPolicies[name] - if p != nil { - return p - } - p = suite.customPolicies[name] - if p != nil { - return p - } - suite.FailNow("Policy not found: ", name) - return nil -} - -func (suite *DefaultPoliciesTestSuite) addDepAndImages(deployment *storage.Deployment, images ...*storage.Image) { - suite.deployments[deployment.GetId()] = deployment - for _, i := range images { - suite.images[i.GetId()] = i - suite.deploymentsToImages[deployment.GetId()] = append(suite.deploymentsToImages[deployment.GetId()], i) - } -} - -func (suite *DefaultPoliciesTestSuite) addImage(img *storage.Image) *storage.Image { - suite.images[img.GetId()] = img - return img -} - -func imageWithComponents(components []*storage.EmbeddedImageScanComponent) *storage.Image { - return &storage.Image{ - Id: uuid.NewV4().String(), - Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, - Scan: &storage.ImageScan{ - Components: components, - }, - } -} - -func imageWithLayers(layers []*storage.ImageLayer) *storage.Image { - return &storage.Image{ - Id: uuid.NewV4().String(), - Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, - Metadata: &storage.ImageMetadata{ - V1: &storage.V1Metadata{ - Layers: layers, - }, - }, - } -} - -func imageWithOS(os string) *storage.Image { - return &storage.Image{ - Id: uuid.NewV4().String(), - Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, - Scan: &storage.ImageScan{ - OperatingSystem: os, - }, - } -} - -func (suite *DefaultPoliciesTestSuite) imageWithSignatureVerificationResults(name string, results []*storage.ImageSignatureVerificationResult) *storage.Image { - // Use util to populate registry, remote and tag - imageName, _, err := imgUtils.GenerateImageNameFromString(name) - if err != nil { - suite.T().Fatalf("failed to parse image name %q: %v", name, err) - } - - // Restore fullName to the passed string, to maintain original behavior - imageName.FullName = name - - img := &storage.Image{ - Id: uuid.NewV4().String(), - Name: imageName, - } - - if results != nil { - img.SignatureVerificationData = &storage.ImageSignatureVerificationData{ - Results: results, - } - } - return img -} - -func deploymentWithImageAnyID(img *storage.Image) *storage.Deployment { - return deploymentWithImage(uuid.NewV4().String(), img) -} - -func deploymentWithImage(id string, img *storage.Image) *storage.Deployment { - remoteSplit := strings.Split(img.GetName().GetFullName(), "/") - alphaOnly := regexp.MustCompile("[^A-Za-z]+") - containerName := alphaOnly.ReplaceAllString(remoteSplit[len(remoteSplit)-1], "") - return &storage.Deployment{ - Id: id, - Containers: []*storage.Container{{Id: img.GetId(), Name: containerName, Image: types.ToContainerImage(img)}}, - } -} - -func (suite *DefaultPoliciesTestSuite) addIndicator(deploymentID, name, args, path string, lineage []string, uid uint32) *storage.ProcessIndicator { - deployment := suite.deployments[deploymentID] - if len(deployment.GetContainers()) == 0 { - deployment.Containers = []*storage.Container{{Name: uuid.NewV4().String()}} - } - lineageInfo := make([]*storage.ProcessSignal_LineageInfo, len(lineage)) - for i, ancestor := range lineage { - lineageInfo[i] = &storage.ProcessSignal_LineageInfo{ - ParentExecFilePath: ancestor, - } - } - indicator := &storage.ProcessIndicator{ - Id: uuid.NewV4().String(), - DeploymentId: deploymentID, - ContainerName: deployment.GetContainers()[0].GetName(), - Signal: &storage.ProcessSignal{ - Name: name, - Args: args, - ExecFilePath: path, - Time: protocompat.TimestampNow(), - LineageInfo: lineageInfo, - Uid: uid, - }, - } - suite.deploymentsToIndicators[deploymentID] = append(suite.deploymentsToIndicators[deploymentID], indicator) - return indicator -} - -type testCase struct { - policyName string - expectedViolations map[string][]*storage.Alert_Violation - expectedProcessViolations map[string][]*storage.ProcessIndicator - - // If shouldNotMatch is specified (which is the case for policies that check for the absence of something), we verify that - // it matches everything except shouldNotMatch. - // If sampleViolationForMatched is provided, we verify that all the matches are the string provided in sampleViolationForMatched. - shouldNotMatch map[string]struct{} - sampleViolationForMatched string - allowUnvalidatedViolations bool -} - -func (suite *DefaultPoliciesTestSuite) getImagesForDeployment(deployment *storage.Deployment) []*storage.Image { - images := suite.deploymentsToImages[deployment.GetId()] - if len(images) == 0 { - return make([]*storage.Image, len(deployment.GetContainers())) - } - suite.Equal(len(deployment.GetContainers()), len(images)) - return images -} - -func getViolationsWithAndWithoutCaching(t *testing.T, matcher func(cache *CacheReceptacle) (Violations, error)) Violations { - violations, err := matcher(nil) - require.NoError(t, err) - - var cache CacheReceptacle - violationsWithEmptyCache, err := matcher(&cache) - require.NoError(t, err) - assertViolations(t, violations, violationsWithEmptyCache) - - violationsWithNonEmptyCache, err := matcher(&cache) - require.NoError(t, err) - assertViolations(t, violations, violationsWithNonEmptyCache) - - return violations -} - -func (suite *DefaultPoliciesTestSuite) TestDefaultPolicies() { - fixtureDep := fixtures.GetDeployment() - fixturesImages := fixtures.DeploymentImages() - - suite.addDepAndImages(fixtureDep, fixturesImages...) - - nginx110 := &storage.Image{ - Id: "SHANGINX110", - Name: &storage.ImageName{ - Registry: "docker.io", - Remote: "library/nginx", - Tag: "1.10", - FullName: "docker.io/library/nginx:1.10", - }, - } - - nginx110Dep := deploymentWithImage("nginx110", nginx110) - suite.addDepAndImages(nginx110Dep, nginx110) - - oldScannedTime := time.Now().Add(-31 * 24 * time.Hour) - - oldScannedImage := &storage.Image{ - Id: "SHAOLDSCANNED", - Name: &storage.ImageName{ - FullName: "docker.io/stackrox/old-scanned-image:0.1", - }, - Scan: &storage.ImageScan{ - ScanTime: protoconv.ConvertTimeToTimestamp(oldScannedTime), - }, - } - oldScannedDep := deploymentWithImage("oldscanned", oldScannedImage) - suite.addDepAndImages(oldScannedDep, oldScannedImage) - - addDockerFileImg := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "ADD", - Value: "deploy.sh", - }, - { - Instruction: "RUN", - Value: "deploy.sh", - }, - }) - addDockerFileDep := deploymentWithImageAnyID(addDockerFileImg) - suite.addDepAndImages(addDockerFileDep, addDockerFileImg) - - imagePort22Image := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "EXPOSE", - Value: "22/tcp", - }, - }) - imagePort22Dep := deploymentWithImageAnyID(imagePort22Image) - suite.addDepAndImages(imagePort22Dep, imagePort22Image) - - insecureCMDImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "CMD", - Value: "do an insecure thing", - }, - }) - - insecureCMDDep := deploymentWithImageAnyID(insecureCMDImage) - suite.addDepAndImages(insecureCMDDep, insecureCMDImage) - - runSecretsImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "VOLUME", - Value: "/run/secrets", - }, - }) - runSecretsArrayImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "VOLUME", - Value: "[/run/secrets]", - }, - }) - runSecretsListImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "VOLUME", - Value: "/var/something /run/secrets", - }, - }) - runSecretsArrayListImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "VOLUME", - Value: "[/var/something /run/secrets]", - }, - }) - runSecretsDep := deploymentWithImageAnyID(runSecretsImage) - runSecretsArrayDep := deploymentWithImageAnyID(runSecretsArrayImage) - runSecretsListDep := deploymentWithImageAnyID(runSecretsListImage) - runSecretsArrayListDep := deploymentWithImageAnyID(runSecretsArrayListImage) - suite.addDepAndImages(runSecretsDep, runSecretsImage) - suite.addDepAndImages(runSecretsArrayDep, runSecretsArrayImage) - suite.addDepAndImages(runSecretsListDep, runSecretsListImage) - suite.addDepAndImages(runSecretsArrayListDep, runSecretsArrayListImage) - - oldImageCreationTime := time.Now().Add(-100 * 24 * time.Hour) - oldCreatedImage := &storage.Image{ - Id: "SHA:OLDCREATEDIMAGE", - Name: &storage.ImageName{ - FullName: "docker.io/stackrox/old-image:0.1", - }, - Metadata: &storage.ImageMetadata{ - V1: &storage.V1Metadata{ - Created: protoconv.ConvertTimeToTimestamp(oldImageCreationTime), - }, - }, - } - oldImageDep := deploymentWithImage("oldimagedep", oldCreatedImage) - suite.addDepAndImages(oldImageDep, oldCreatedImage) - - apkImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "apk-tools", Version: "1.2"}, - {Name: "asfa", Version: "1.5"}, - }) - apkDep := deploymentWithImageAnyID(apkImage) - suite.addDepAndImages(apkDep, apkImage) - - curlImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "curl", Version: "1.3"}, - {Name: "curlwithextra", Version: "0.9"}, - }) - curlDep := deploymentWithImageAnyID(curlImage) - suite.addDepAndImages(curlDep, curlImage) - - componentDeps := make(map[string]*storage.Deployment) - for _, component := range []string{"apt", "dnf", "wget"} { - img := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: component}, - }) - dep := deploymentWithImageAnyID(img) - suite.addDepAndImages(dep, img) - componentDeps[component] = dep - } - - heartbleedDep := &storage.Deployment{ - Id: "HEARTBLEEDDEPID", - Containers: []*storage.Container{ - { - Name: "nginx", - SecurityContext: &storage.SecurityContext{Privileged: true}, - Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, - }, - }, - } - suite.addDepAndImages(heartbleedDep, &storage.Image{ - Id: "HEARTBLEEDDEPSHA", - Name: &storage.ImageName{FullName: "heartbleed"}, - Scan: &storage.ImageScan{ - Components: []*storage.EmbeddedImageScanComponent{ - {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}}, - }}, - }, - }, - }) - - requiredImageLabel := &storage.Deployment{ - Id: "requiredImageLabel", - Containers: []*storage.Container{ - { - Name: "REQUIREDIMAGELABEL", - Image: &storage.ContainerImage{Id: "requiredImageLabelImage"}, - }, - }, - } - suite.addDepAndImages(requiredImageLabel, &storage.Image{ - Id: "requiredImageLabelImage", - Name: &storage.ImageName{ - FullName: "docker.io/stackrox/required-image:0.1", - }, - Metadata: &storage.ImageMetadata{ - V1: &storage.V1Metadata{ - Labels: map[string]string{ - "required-label": "required-value", - }, - }, - }, - }) - - shellshockImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "shellshock", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-6271", Link: "https://shellshock", Cvss: 6}, - {Cve: "CVE-ARBITRARY", Link: "https://notshellshock"}, - }}, - }) - shellshockDep := deploymentWithImageAnyID(shellshockImage) - suite.addDepAndImages(shellshockDep, shellshockImage) - - suppressedShellshockImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "shellshock", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2014-6271", Link: "https://shellshock", Cvss: 6, Suppressed: true}, - {Cve: "CVE-ARBITRARY", Link: "https://notshellshock"}, - }}, - }) - suppressedShellShockDep := deploymentWithImageAnyID(suppressedShellshockImage) - suite.addDepAndImages(suppressedShellShockDep, suppressedShellshockImage) - - strutsImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2017-5638", Link: "https://struts", Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, - }}, - {Name: "OTHER", Version: "1.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-1223-451", Link: "https://cvefake"}, - }}, - }) - strutsDep := deploymentWithImageAnyID(strutsImage) - suite.addDepAndImages(strutsDep, strutsImage) - - strutsImageSuppressed := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2017-5638", Link: "https://struts", Suppressed: true, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, - }}, - {Name: "OTHER", Version: "1.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-1223-451", Link: "https://cvefake"}, - }}, - }) - strutsDepSuppressed := deploymentWithImageAnyID(strutsImageSuppressed) - suite.addDepAndImages(strutsDepSuppressed, strutsImageSuppressed) - - // When image is pull out, the deferred field is set based upon the legacy suppressed field. Therefore, both are set. - // However, here we are specifically testing whether detection is taking the new vulnerability state field into - // account by not setting the suppressed field. - structImageWithDeferredVulns := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "deferred-struts", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-2017-5638", Link: "https://struts", State: storage.VulnerabilityState_DEFERRED, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, - {Cve: "CVE-2017-FP", Link: "https://struts", State: storage.VulnerabilityState_FALSE_POSITIVE, Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, - {Cve: "CVE-2017-FAKE", Link: "https://struts", Cvss: 8, Severity: storage.VulnerabilitySeverity_IMPORTANT_VULNERABILITY_SEVERITY, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.3"}}, - }}, - }) - structDepWithDeferredVulns := deploymentWithImageAnyID(structImageWithDeferredVulns) - suite.addDepAndImages(structDepWithDeferredVulns, structImageWithDeferredVulns) - - depWithNonSeriousVulnsImage := imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "NOSERIOUS", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-1234-5678", Link: "https://abcdefgh"}, - {Cve: "CVE-5678-1234", Link: "https://lmnopqrst"}, - }}, - }) - depWithNonSeriousVulns := deploymentWithImageAnyID(depWithNonSeriousVulnsImage) - suite.addDepAndImages(depWithNonSeriousVulns, depWithNonSeriousVulnsImage) - - dockerSockDep := &storage.Deployment{ - Id: "DOCKERSOCDEP", - Containers: []*storage.Container{ - { - Name: "dockersock", - Volumes: []*storage.Volume{ - {Source: "/var/run/docker.sock", Name: "DOCKERSOCK", Type: "HostPath", ReadOnly: true}, - {Source: "NOTDOCKERSOCK"}, - }}, - }, - } - suite.addDepAndImages(dockerSockDep) - - crioSockDep := &storage.Deployment{ - Id: "CRIOSOCDEP", - Containers: []*storage.Container{ - { - Name: "criosock", - Volumes: []*storage.Volume{ - {Source: "/run/crio/crio.sock", Name: "CRIOSOCK", Type: "HostPath", ReadOnly: true}, - {Source: "NOTCRIORSOCK"}, - }}, - }, - } - suite.addDepAndImages(crioSockDep) - - containerPort22Dep := &storage.Deployment{ - Id: "CONTAINERPORT22DEP", - Ports: []*storage.PortConfig{ - {Protocol: "TCP", ContainerPort: 22}, - {Protocol: "UDP", ContainerPort: 4125}, - }, - } - suite.addDepAndImages(containerPort22Dep) - - secretEnvDep := &storage.Deployment{ - Id: "SECRETENVDEP", - Containers: []*storage.Container{ - { - Name: "secretenv", - Config: &storage.ContainerConfig{ - Env: []*storage.ContainerConfig_EnvironmentConfig{ - {Key: "THIS_IS_SECRET_VAR", Value: "stealthmode", EnvVarSource: storage.ContainerConfig_EnvironmentConfig_RAW}, - {Key: "HOME", Value: "/home/stackrox"}, - }, - }}, - }, - } - suite.addDepAndImages(secretEnvDep) - - secretEnvSrcUnsetDep := &storage.Deployment{ - Id: "SECRETENVSRCUNSETDEP", - Containers: []*storage.Container{ - { - Name: "secretenvsrcunset", - Config: &storage.ContainerConfig{ - Env: []*storage.ContainerConfig_EnvironmentConfig{ - {Key: "THIS_IS_SECRET_VAR", Value: "stealthmode"}, - }, - }}, - }, - } - suite.addDepAndImages(secretEnvSrcUnsetDep) - - secretKeyRefDep := &storage.Deployment{ - Id: "SECRETKEYREFDEP", - Containers: []*storage.Container{ - {Config: &storage.ContainerConfig{ - Env: []*storage.ContainerConfig_EnvironmentConfig{ - {Key: "THIS_IS_SECRET_VAR", EnvVarSource: storage.ContainerConfig_EnvironmentConfig_SECRET_KEY}, - {Key: "HOME", Value: "/home/stackrox"}, - }, - }}, - }, - } - suite.addDepAndImages(secretKeyRefDep) - - // Fake deployment that shouldn't match anything, just to make sure - // that none of our queries will accidentally match it. - suite.addDepAndImages(&storage.Deployment{Id: "FAKEID", Name: "FAKENAME"}) - - depWithGoodEmailAnnotation := &storage.Deployment{ - Id: "GOODEMAILDEPID", - Annotations: map[string]string{ - "email": "vv@stackrox.com", - }, - } - suite.addDepAndImages(depWithGoodEmailAnnotation) - - depWithOwnerAnnotation := &storage.Deployment{ - Id: "OWNERANNOTATIONDEP", - Annotations: map[string]string{ - "owner": "IOWNTHIS", - "blah": "Blah", - }, - } - suite.addDepAndImages(depWithOwnerAnnotation) - - depWithOwnerLabel := &storage.Deployment{ - Id: "OWNERLABELDEP", - Labels: map[string]string{ - "owner": "IOWNTHIS", - "blah": "Blah", - }, - } - suite.addDepAndImages(depWithOwnerLabel) - - depWitharbitraryAnnotations := &storage.Deployment{ - Id: "ARBITRARYANNOTATIONDEPID", - Annotations: map[string]string{ - "emailnot": "vv@stackrox.com", - "notemail": "vv@stackrox.com", - "ownernot": "vv", - "nowner": "vv", - }, - } - suite.addDepAndImages(depWitharbitraryAnnotations) - - depWithBadEmailAnnotation := &storage.Deployment{ - Id: "BADEMAILDEPID", - Annotations: map[string]string{ - "email": "NOTANEMAIL", - }, - } - suite.addDepAndImages(depWithBadEmailAnnotation) - - sysAdminDep := &storage.Deployment{ - Id: "SYSADMINDEPID", - Containers: []*storage.Container{ - { - Name: "cap-sys", - SecurityContext: &storage.SecurityContext{ - AddCapabilities: []string{"SYS_ADMIN"}, - }, - }, - }, - } - suite.addDepAndImages(sysAdminDep) - - depWithAllResourceLimitsRequestsSpecified := &storage.Deployment{ - Id: "ALLRESOURCESANDLIMITSDEP", - Containers: []*storage.Container{ - {Resources: &storage.Resources{ - CpuCoresRequest: 0.1, - CpuCoresLimit: 0.3, - MemoryMbLimit: 100, - MemoryMbRequest: 1251, - }}, - }, - } - suite.addDepAndImages(depWithAllResourceLimitsRequestsSpecified) - - depWithEnforcementBypassAnnotation := &storage.Deployment{ - Id: "ENFORCEMENTBYPASS", - Annotations: map[string]string{ - "admission.stackrox.io/break-glass": "ticket-1234", - "some-other": "annotation", - }, - } - suite.addDepAndImages(depWithEnforcementBypassAnnotation) - - hostMountDep := &storage.Deployment{ - Id: "HOSTMOUNT", - Containers: []*storage.Container{ - { - Name: "hostmount", - Volumes: []*storage.Volume{ - {Source: "/etc/passwd", Name: "HOSTMOUNT", Type: "HostPath"}, - {Source: "/var/lib/kubelet", Name: "KUBELET", Type: "HostPath", ReadOnly: true}, - }}, - }, - } - suite.addDepAndImages(hostMountDep) - - hostPIDDep := &storage.Deployment{ - Id: "HOSTPID", - HostPid: true, - } - suite.addDepAndImages(hostPIDDep) - - hostIPCDep := &storage.Deployment{ - Id: "HOSTIPC", - HostIpc: true, - } - suite.addDepAndImages(hostIPCDep) - - imgWithFixedByEmpty := suite.addImage(imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "EXplicitlyEmptyFixedBy", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-1234-5678", Cvss: 8, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{}}, - }}, - })) - - imgWithFixedByEmptyOnlyForSome := suite.addImage(imageWithComponents([]*storage.EmbeddedImageScanComponent{ - {Name: "EXplicitlyEmptyFixedBy", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-1234-5678", Cvss: 8, Severity: storage.VulnerabilitySeverity_CRITICAL_VULNERABILITY_SEVERITY, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{}}, - }}, - {Name: "Normal", Version: "2.3", Vulns: []*storage.EmbeddedVulnerability{ - {Cve: "CVE-5612-1245", Cvss: 8, Severity: storage.VulnerabilitySeverity_CRITICAL_VULNERABILITY_SEVERITY, Link: "https://abcdefgh", SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "actually_fixable"}}, - }}, - })) - - rootUserImage := &storage.Image{ - Id: "SHA:ROOTUSERIMAGE", - Name: &storage.ImageName{ - FullName: "docker.io/stackrox/rootuser:0.1", - }, - Metadata: &storage.ImageMetadata{ - V1: &storage.V1Metadata{ - User: "root", - }, - }, - } - depWithRootUser := deploymentWithImageAnyID(rootUserImage) - suite.addDepAndImages(depWithRootUser, rootUserImage) - - updateInstructionImage := imageWithLayers([]*storage.ImageLayer{ - { - Instruction: "RUN", - Value: "apt-get update", - }, - }) - depWithUpdate := deploymentWithImageAnyID(updateInstructionImage) - suite.addDepAndImages(depWithUpdate, updateInstructionImage) - - restrictedHostPortDep := &storage.Deployment{ - Id: "RESTRICTEDHOSTPORT", - Ports: []*storage.PortConfig{ - { - ExposureInfos: []*storage.PortConfig_ExposureInfo{ - { - NodePort: 22, - }, - }, - }, - }, - } - - suite.addDepAndImages(restrictedHostPortDep) - - mountPropagationDep := &storage.Deployment{ - Id: "MOUNTPROPAGATIONDEP", - Containers: []*storage.Container{ - { - Id: "MOUNTPROPAGATIONCONTAINER", - Volumes: []*storage.Volume{ - { - Name: "ThisMountIsOnFire", - MountPropagation: storage.Volume_BIDIRECTIONAL, - }, - }, - }, - }, - } - suite.addDepAndImages(mountPropagationDep) - - noSeccompProfileDep := &storage.Deployment{ - Id: "NOSECCOMPPROFILEDEP", - Containers: []*storage.Container{ - { - SecurityContext: &storage.SecurityContext{ - SeccompProfile: &storage.SecurityContext_SeccompProfile{ - Type: storage.SecurityContext_SeccompProfile_UNCONFINED, - }, - }, - }, - }, - } - suite.addDepAndImages(noSeccompProfileDep) - - hostNetworkDep := &storage.Deployment{ - Id: "HOSTNETWORK", - HostNetwork: true, - } - suite.addDepAndImages(hostNetworkDep) - - noAppArmorProfileDep := &storage.Deployment{ - Id: "NOAPPARMORPROFILEDEP", - Containers: []*storage.Container{ - { - Name: "No AppArmor Profile", - Config: &storage.ContainerConfig{ - AppArmorProfile: "unconfined", - }, - }, - }, - } - suite.addDepAndImages(noAppArmorProfileDep) - - // Images "made by Red Hat" - coming from Red Hat registries or Red Hat remotes in quay.io - registryAccessRedhatComUnverifiedImg := suite.imageWithSignatureVerificationResults("registry.access.redhat.com/redhat/ubi8:latest", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), - Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, - }, - }, - ) - registryRedHatIoUnverifiedImg := suite.imageWithSignatureVerificationResults("registry.redhat.io/redhat/ubi8:latest", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), - Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, - }, - }, - ) - - quayOCPReleaseUnverifiedImg := suite.imageWithSignatureVerificationResults("quay.io/openshift-release-dev/ocp-release:latest", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), - Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, - }, - }, - ) - quayOCPArtDevUnverifiedImg := suite.imageWithSignatureVerificationResults("quay.io/openshift-release-dev/ocp-v4.0-art-dev:latest", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: signatures.DefaultRedHatSignatureIntegration.GetId(), - Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, - }, - }, - ) - - suite.addImage(registryAccessRedhatComUnverifiedImg) - suite.addImage(registryRedHatIoUnverifiedImg) - suite.addImage(quayOCPReleaseUnverifiedImg) - suite.addImage(quayOCPArtDevUnverifiedImg) - - // Index processes - bashLineage := []string{"/bin/bash"} - fixtureDepAptIndicator := suite.addIndicator(fixtureDep.GetId(), "apt", "", "/usr/bin/apt", bashLineage, 1) - sysAdminDepAptIndicator := suite.addIndicator(sysAdminDep.GetId(), "apt", "install blah", "/usr/bin/apt", bashLineage, 1) - - kubeletIndicator := suite.addIndicator(containerPort22Dep.GetId(), "curl", "-v -k -SL https://12.13.14.15:10250", "/bin/curl", bashLineage, 1) - kubeletIndicator2 := suite.addIndicator(containerPort22Dep.GetId(), "wget", "https://heapster.kube-system/metrics", "/bin/wget", bashLineage, 1) - kubeletIndicator3 := suite.addIndicator(containerPort22Dep.GetId(), "curl", "https://12.13.14.15:10250 -v -k", "/bin/curl", bashLineage, 1) - - crontabIndicator := suite.addIndicator(containerPort22Dep.GetId(), "crontab", "1 2 3 4 5 6", "/bin/crontab", bashLineage, 1) - - nmapIndicatorfixtureDep1 := suite.addIndicator(fixtureDep.GetId(), "nmap", "blah", "/usr/bin/nmap", bashLineage, 1) - nmapIndicatorfixtureDep2 := suite.addIndicator(fixtureDep.GetId(), "nmap", "blah2", "/usr/bin/nmap", bashLineage, 1) - nmapIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "nmap", "", "/usr/bin/nmap", bashLineage, 1) - - ifconfigIndicatorfixtureDep1 := suite.addIndicator(fixtureDep.GetId(), "ifconfig", "blah", "/sbin/ifconfig", bashLineage, 1) - ifconfigIndicatorfixtureDep2 := suite.addIndicator(fixtureDep.GetId(), "ifconfig", "blah2", "/usr/bin/ifconfig", bashLineage, 1) - ipIndicatorfixtureDep := suite.addIndicator(fixtureDep.GetId(), "ip", "", "/sbin/ip", bashLineage, 1) - arpIndicatorfixtureDep := suite.addIndicator(fixtureDep.GetId(), "arp", "", "/usr/sbin/arp", bashLineage, 1) - ifconfigIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "ifconfig", "", "/sbin/ifconfig", bashLineage, 1) - ipIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "ip", "", "/sbin/ip", bashLineage, 1) - arpIndicatorNginx110Dep := suite.addIndicator(nginx110Dep.GetId(), "arp", "", "/usr/sbin/arp", bashLineage, 1) - // These two should not match for the Network Management Execution policy. See ROX-6011 - suite.addIndicator(fixtureDep.GetId(), "pip", "", "/usr/bin/pip", bashLineage, 1) - suite.addIndicator(nginx110Dep.GetId(), "pip", "", "/usr/bin/pip", bashLineage, 1) - - javaLineage := []string{"/bin/bash", "/mnt/scripts/run_server.sh", "/bin/java"} - fixtureDepJavaIndicator := suite.addIndicator(fixtureDep.GetId(), "/bin/bash", "-attack", "/bin/bash", javaLineage, 0) - - deploymentTestCases := []testCase{ - { - policyName: "Latest tag", - expectedViolations: map[string][]*storage.Alert_Violation{ - fixtureDep.GetId(): { - { - Message: "Container 'supervulnerable' has image with tag 'latest'", - }, - }, - }, - }, - { - policyName: "Alpine Linux Package Manager (apk) in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - apkDep.GetId(): { - { - Message: "Container 'ASFASF' includes component 'apk-tools' (version 1.2)", - }, - }, - }, - }, - { - policyName: "Ubuntu Package Manager in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - componentDeps["apt"].GetId(): { - { - Message: "Container 'ASFASF' includes component 'apt'", - }, - }, - }, - }, - { - policyName: "Curl in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - curlDep.GetId(): { - { - Message: "Container 'ASFASF' includes component 'curl' (version 1.3)", - }, - }, - }, - }, - { - policyName: "Red Hat Package Manager in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - componentDeps["dnf"].GetId(): { - { - Message: "Container 'ASFASF' includes component 'dnf'", - }, - }, - }, - }, - { - policyName: "Wget in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - componentDeps["wget"].GetId(): { - { - Message: "Container 'ASFASF' includes component 'wget'", - }, - }, - }, - }, - { - policyName: "Mount Container Runtime Socket", - expectedViolations: map[string][]*storage.Alert_Violation{ - dockerSockDep.GetId(): { - { - Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'", - }, - }, - crioSockDep.GetId(): { - { - Message: "Read-only volume 'CRIOSOCK' has source '/run/crio/crio.sock' and type 'HostPath'", - }, - }, - }, - }, - { - policyName: "90-Day Image Age", - expectedViolations: map[string][]*storage.Alert_Violation{ - oldImageDep.GetId(): { - { - Message: fmt.Sprintf("Container 'oldimage' has image created at %s (UTC)", readable.Time(oldImageCreationTime)), - }, - }, - }, - }, - { - policyName: "30-Day Scan Age", - expectedViolations: map[string][]*storage.Alert_Violation{ - oldScannedDep.GetId(): { - { - Message: fmt.Sprintf("Container 'oldscannedimage' has image last scanned at %s (UTC)", readable.Time(oldScannedTime)), - }, - }, - }, - }, - { - policyName: "Secure Shell (ssh) Port Exposed in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - imagePort22Dep.GetId(): { - { - Message: "Dockerfile line 'EXPOSE 22/tcp' found in container 'ASFASF'", - }, - }, - }, - }, - { - policyName: "Secure Shell (ssh) Port Exposed", - expectedViolations: map[string][]*storage.Alert_Violation{ - containerPort22Dep.GetId(): { - { - Message: "Exposed port 22/TCP is present", - }, - }, - }, - }, - { - policyName: "Privileged Container", - expectedViolations: map[string][]*storage.Alert_Violation{ - fixtureDep.GetId(): { - { - Message: "Container 'nginx110container' is privileged", - }, - }, - heartbleedDep.GetId(): { - { - Message: "Container 'nginx' is privileged", - }, - }, - }, - }, - { - policyName: "Container using read-write root filesystem", - expectedViolations: map[string][]*storage.Alert_Violation{ - heartbleedDep.GetId(): { - { - Message: "Container 'nginx' uses a read-write root filesystem", - }, - }, - fixtureDep.GetId(): { - { - Message: "Container 'nginx110container' uses a read-write root filesystem", - }, - }, - sysAdminDep.GetId(): { - { - Message: "Container 'cap-sys' uses a read-write root filesystem", - }, - }, - noSeccompProfileDep.GetId(): { - { - Message: "Container uses a read-write root filesystem", - }, - }, - }, - }, - { - policyName: "Insecure specified in CMD", - expectedViolations: map[string][]*storage.Alert_Violation{ - insecureCMDDep.GetId(): { - { - Message: "Dockerfile line 'CMD do an insecure thing' found in container 'ASFASF'", - }, - }, - }, - }, - { - policyName: "Improper Usage of Orchestrator Secrets Volume", - expectedViolations: map[string][]*storage.Alert_Violation{ - runSecretsDep.GetId(): { - { - Message: "Dockerfile line 'VOLUME /run/secrets' found in container 'ASFASF'", - }, - }, - runSecretsArrayDep.GetId(): { - { - Message: "Dockerfile line 'VOLUME [/run/secrets]' found in container 'ASFASF'", - }, - }, - runSecretsListDep.GetId(): { - { - Message: "Dockerfile line 'VOLUME /var/something /run/secrets' found in container 'ASFASF'", - }, - }, - runSecretsArrayListDep.GetId(): { - { - Message: "Dockerfile line 'VOLUME [/var/something /run/secrets]' found in container 'ASFASF'", - }, - }, - }, - }, - { - policyName: "Images with no scans", - shouldNotMatch: map[string]struct{}{ - // These deployments have scans on their images. - fixtureDep.GetId(): {}, - oldScannedDep.GetId(): {}, - heartbleedDep.GetId(): {}, - apkDep.GetId(): {}, - curlDep.GetId(): {}, - componentDeps["apt"].GetId(): {}, - componentDeps["dnf"].GetId(): {}, - componentDeps["wget"].GetId(): {}, - shellshockDep.GetId(): {}, - suppressedShellShockDep.GetId(): {}, - strutsDep.GetId(): {}, - strutsDepSuppressed.GetId(): {}, - structDepWithDeferredVulns.GetId(): {}, - depWithNonSeriousVulns.GetId(): {}, - // The rest of the deployments have no images! - "FAKEID": {}, - containerPort22Dep.GetId(): {}, - dockerSockDep.GetId(): {}, - crioSockDep.GetId(): {}, - secretEnvDep.GetId(): {}, - secretEnvSrcUnsetDep.GetId(): {}, - secretKeyRefDep.GetId(): {}, - depWithOwnerAnnotation.GetId(): {}, - depWithOwnerLabel.GetId(): {}, - depWithGoodEmailAnnotation.GetId(): {}, - depWithBadEmailAnnotation.GetId(): {}, - depWitharbitraryAnnotations.GetId(): {}, - sysAdminDep.GetId(): {}, - depWithAllResourceLimitsRequestsSpecified.GetId(): {}, - depWithEnforcementBypassAnnotation.GetId(): {}, - hostMountDep.GetId(): {}, - restrictedHostPortDep.GetId(): {}, - hostPIDDep.GetId(): {}, - hostIPCDep.GetId(): {}, - mountPropagationDep.GetId(): {}, - noSeccompProfileDep.GetId(): {}, - hostNetworkDep.GetId(): {}, - noAppArmorProfileDep.GetId(): {}, - }, - sampleViolationForMatched: "Image in container '%s' has not been scanned", - }, - { - policyName: "Required Annotation: Email", - shouldNotMatch: map[string]struct{}{ - depWithGoodEmailAnnotation.GetId(): {}, - }, - sampleViolationForMatched: "Required annotation not found (key = 'email', value = '[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+')", - }, - { - policyName: "Required Label: Owner/Team", - shouldNotMatch: map[string]struct{}{ - depWithOwnerLabel.GetId(): {}, - fixtureDep.GetId(): {}, - }, - sampleViolationForMatched: "Required label not found (key = 'owner|team', value = '.+')", - }, - { - policyName: "Required Annotation: Owner/Team", - shouldNotMatch: map[string]struct{}{ - depWithOwnerAnnotation.GetId(): {}, - fixtureDep.GetId(): {}, - }, - sampleViolationForMatched: "Required annotation not found (key = 'owner|team', value = '.+')", - }, - { - policyName: "CAP_SYS_ADMIN capability added", - expectedViolations: map[string][]*storage.Alert_Violation{ - sysAdminDep.GetId(): { - { - Message: "Container 'cap-sys' adds capability SYS_ADMIN", - }, - }, - fixtureDep.GetId(): { - { - Message: "Container 'nginx110container' adds capability SYS_ADMIN", - }, - }, - }, - }, - { - policyName: "Apache Struts: CVE-2017-5638", - expectedViolations: map[string][]*storage.Alert_Violation{ - strutsDep.GetId(): { - { - Message: "CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF'", - }, - }, - // CVE-2017-5638 is deferred in `deferred-struct`, hence no violation. - }, - }, - { - policyName: "No CPU request or memory limit specified", - expectedViolations: map[string][]*storage.Alert_Violation{ - fixtureDep.GetId(): { - {Message: "Memory limit set to 0 MB for container 'nginx110container'"}, - }, - }, - }, - { - policyName: "Environment Variable Contains Secret", - expectedViolations: map[string][]*storage.Alert_Violation{ - secretEnvDep.GetId(): { - { - Message: "Environment variable 'THIS_IS_SECRET_VAR' is present in container 'secretenv'", - }, - }, - }, - }, - { - policyName: "Secret Mounted as Environment Variable", - expectedViolations: map[string][]*storage.Alert_Violation{ - secretKeyRefDep.GetId(): { - { - Message: "Environment variable 'THIS_IS_SECRET_VAR' is present and references a Secret", - }, - }, - }, - }, - { - policyName: "Fixable CVSS >= 6 and Privileged", - expectedViolations: map[string][]*storage.Alert_Violation{ - heartbleedDep.GetId(): { - { - Message: "Container 'nginx' is privileged", - }, - { - Message: "Fixable CVE-2014-0160 (CVSS 6) (severity Unknown) found in component 'heartbleed' (version 1.2) in container 'nginx', resolved by version v1.2", - }, - }, - }, - }, - { - policyName: "Fixable CVSS >= 7", - expectedViolations: map[string][]*storage.Alert_Violation{ - strutsDep.GetId(): { - { - Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - structDepWithDeferredVulns.GetId(): { - { - Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - }, - }, - { - policyName: "Fixable Severity at least Important", - expectedViolations: map[string][]*storage.Alert_Violation{ - strutsDep.GetId(): { - { - Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - structDepWithDeferredVulns.GetId(): { - { - Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - }, - }, - { - policyName: "ADD Command used instead of COPY", - expectedViolations: map[string][]*storage.Alert_Violation{ - addDockerFileDep.GetId(): { - { - Message: "Dockerfile line 'ADD deploy.sh' found in container 'ASFASF'", - }, - }, - fixtureDep.GetId(): { - { - Message: "Dockerfile line 'ADD FILE:blah' found in container 'nginx110container'", - }, - { - Message: "Dockerfile line 'ADD file:4eedf861fb567fffb2694b65ebd...' found in container 'supervulnerable'", - }, - }, - }, - }, - { - policyName: "nmap Execution", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - fixtureDep.GetId(): {nmapIndicatorfixtureDep1, nmapIndicatorfixtureDep2}, - nginx110Dep.GetId(): {nmapIndicatorNginx110Dep}, - }, - }, - { - policyName: "Process Targeting Cluster Kubelet Endpoint", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - containerPort22Dep.GetId(): {kubeletIndicator, kubeletIndicator2, kubeletIndicator3}, - }, - }, - { - policyName: "crontab Execution", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - containerPort22Dep.GetId(): {crontabIndicator}, - }, - }, - { - policyName: "Ubuntu Package Manager Execution", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - fixtureDep.GetId(): {fixtureDepAptIndicator}, - sysAdminDep.GetId(): {sysAdminDepAptIndicator}, - }, - }, - { - policyName: "Process with UID 0", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - fixtureDep.GetId(): {fixtureDepJavaIndicator}, - }, - }, - { - policyName: "Shell Spawned by Java Application", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - fixtureDep.GetId(): {fixtureDepJavaIndicator}, - }, - }, - { - policyName: "Network Management Execution", - expectedProcessViolations: map[string][]*storage.ProcessIndicator{ - fixtureDep.GetId(): {ifconfigIndicatorfixtureDep1, ifconfigIndicatorfixtureDep2, ipIndicatorfixtureDep, arpIndicatorfixtureDep}, - nginx110Dep.GetId(): {ifconfigIndicatorNginx110Dep, ipIndicatorNginx110Dep, arpIndicatorNginx110Dep}, - }, - }, - { - policyName: "Emergency Deployment Annotation", - expectedViolations: map[string][]*storage.Alert_Violation{ - depWithEnforcementBypassAnnotation.GetId(): { - {Message: "Disallowed annotations found: admission.stackrox.io/break-glass=ticket-1234"}, - }, - }, - }, - { - policyName: "Mounting Sensitive Host Directories", - expectedViolations: map[string][]*storage.Alert_Violation{ - hostMountDep.GetId(): { - {Message: "Read-only volume 'KUBELET' has source '/var/lib/kubelet' and type 'HostPath'"}, - {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, - }, - dockerSockDep.GetId(): { - {Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'"}, - }, - }, - }, - { - policyName: writableHostMountPolicyName, - expectedViolations: map[string][]*storage.Alert_Violation{ - hostMountDep.GetId(): { - {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, - }, - }, - }, - { - policyName: "Docker CIS 4.1: Ensure That a User for the Container Has Been Created", - expectedViolations: map[string][]*storage.Alert_Violation{ - depWithRootUser.GetId(): { - { - Message: "Container 'rootuser' has image with user 'root'", - }, - }, - }, - }, - { - policyName: "Docker CIS 4.7: Alert on Update Instruction", - expectedViolations: map[string][]*storage.Alert_Violation{ - depWithUpdate.GetId(): { - { - Message: "Dockerfile line 'RUN apt-get update' found in container 'ASFASF'", - }, - }, - }, - }, - { - policyName: "Docker CIS 5.7: Ensure privileged ports are not mapped within containers", - expectedViolations: map[string][]*storage.Alert_Violation{ - restrictedHostPortDep.GetId(): { - { - Message: "Exposed node port 22 is present", - }, - }, - }, - }, - { - policyName: "Docker CIS 5.15: Ensure that the host's process namespace is not shared", - expectedViolations: map[string][]*storage.Alert_Violation{ - hostPIDDep.GetId(): { - {Message: "Deployment uses the host's process ID namespace"}, - }, - }, - }, - { - policyName: "Docker CIS 5.16: Ensure that the host's IPC namespace is not shared", - expectedViolations: map[string][]*storage.Alert_Violation{ - hostIPCDep.GetId(): { - {Message: "Deployment uses the host's IPC namespace"}, - }, - }, - }, - { - policyName: "Docker CIS 5.19: Ensure mount propagation mode is not enabled", - expectedViolations: map[string][]*storage.Alert_Violation{ - mountPropagationDep.GetId(): { - {Message: "Writable volume 'ThisMountIsOnFire' has mount propagation 'bidirectional'"}, - }, - }, - }, - { - policyName: "Docker CIS 5.21: Ensure the default seccomp profile is not disabled", - expectedViolations: map[string][]*storage.Alert_Violation{ - noSeccompProfileDep.GetId(): { - {Message: "Container has Seccomp profile type 'unconfined'"}, - }, - }, - }, - { - policyName: "Docker CIS 5.9 and 5.20: Ensure that the host's network namespace is not shared", - expectedViolations: map[string][]*storage.Alert_Violation{ - hostNetworkDep.GetId(): { - {Message: "Deployment uses the host's network namespace"}, - }, - }, - }, - { - policyName: "Docker CIS 5.1 Ensure that, if applicable, an AppArmor Profile is enabled", - expectedViolations: map[string][]*storage.Alert_Violation{ - noAppArmorProfileDep.GetId(): { - {Message: "Container 'No AppArmor Profile' has AppArmor profile type 'unconfined'"}, - }, - }, - }, - { - policyName: "Docker CIS 4.4: Ensure images are scanned and rebuilt to include security patches", - allowUnvalidatedViolations: true, - expectedViolations: map[string][]*storage.Alert_Violation{ - strutsDep.GetId(): { - { - Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - heartbleedDep.GetId(): { - { - Message: "Fixable CVE-2014-0160 (CVSS 6) (severity Unknown) found in component 'heartbleed' (version 1.2) in container 'nginx', resolved by version v1.2", - }, - }, - fixtureDep.GetId(): { - { - Message: "Fixable CVE-2014-6200 (CVSS 5) (severity Moderate) found in component 'name' (version 1.2.3.4) in container 'supervulnerable', resolved by version abcdefg", - }, - }, - fixtures.LightweightDeployment().GetId(): { - { - Message: "Fixable CVE-2014-6200 (CVSS 5) (severity Moderate) found in component 'name' (version 1.2.3.4) in container 'supervulnerable', resolved by version abcdefg", - }, - }, - structDepWithDeferredVulns.GetId(): { - { - Message: "Fixable CVE-2017-FAKE (CVSS 8) (severity Important) found in component 'deferred-struts' (version 1.2) in container 'ASFASF', resolved by version v1.3", - }, - }, - }, - }, - { - policyName: anyHostPathPolicyName, - expectedViolations: map[string][]*storage.Alert_Violation{ - dockerSockDep.GetId(): { - {Message: "Read-only volume 'DOCKERSOCK' has source '/var/run/docker.sock' and type 'HostPath'"}, - }, - crioSockDep.GetId(): { - {Message: "Read-only volume 'CRIOSOCK' has source '/run/crio/crio.sock' and type 'HostPath'"}, - }, - hostMountDep.GetId(): { - {Message: "Read-only volume 'KUBELET' has source '/var/lib/kubelet' and type 'HostPath'"}, - {Message: "Writable volume 'HOSTMOUNT' has source '/etc/passwd' and type 'HostPath'"}, - }, - }, - }, - } - - for _, c := range deploymentTestCases { - p := suite.MustGetPolicy(c.policyName) - suite.T().Run(fmt.Sprintf("%s (on deployments)", c.policyName), func(t *testing.T) { - if len(c.shouldNotMatch) == 0 { - assert.True(t, (c.expectedViolations != nil) != (c.expectedProcessViolations != nil), "Every test case must "+ - "contain exactly one of expectedViolations and expectedProcessViolations") - } else { - assert.Nil(t, c.expectedViolations, "Cannot specify shouldNotMatch AND expectedViolations") - assert.Nil(t, c.expectedProcessViolations, "Cannot specify shouldNotMatch AND expectedProcessViolations") - } - - m, err := BuildDeploymentMatcher(p) - require.NoError(t, err) - - if c.expectedProcessViolations != nil { - processMatcher, err := BuildDeploymentWithProcessMatcher(p) - require.NoError(t, err) - for deploymentID, processes := range c.expectedProcessViolations { - expectedProcesses := set.NewStringSet(sliceutils.Map(processes, func(p *storage.ProcessIndicator) string { - return p.GetId() - })...) - deployment := suite.deployments[deploymentID] - - for _, process := range suite.deploymentsToIndicators[deploymentID] { - match := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { - return processMatcher.MatchDeploymentWithProcess(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), process, false) - }) - require.NoError(t, err) - if expectedProcesses.Contains(process.GetId()) { - assert.NotNil(t, match.ProcessViolation, "process %+v should match", process) - } else { - assert.Nil(t, match.ProcessViolation, "process %+v should not match", process) - } - } - } - return - } - - actualViolations := make(map[string][]*storage.Alert_Violation) - for id, deployment := range suite.deployments { - violationsForDep := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { - return m.MatchDeployment(cache, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - }) - assert.Nil(t, violationsForDep.ProcessViolation) - if alertViolations := violationsForDep.AlertViolations; len(alertViolations) > 0 { - actualViolations[id] = alertViolations - } - } - if len(c.shouldNotMatch) > 0 { - for shouldNotMatchID := range c.shouldNotMatch { - assert.Contains(t, suite.deployments, shouldNotMatchID) - assert.NotContains(t, actualViolations, shouldNotMatchID) - } - for id := range suite.deployments { - if _, shouldNotMatch := c.shouldNotMatch[id]; !shouldNotMatch { - assert.Contains(t, actualViolations, id) - - // TODO(rc) update for BPL and check all sampleViolationForMatched - if c.policyName == "Images with no scans" { - if len(suite.deployments[id].GetContainers()) == 1 { - msg := fmt.Sprintf(c.sampleViolationForMatched, suite.deployments[id].GetContainers()[0].GetName()) - protoassert.SlicesEqual(t, actualViolations[id], []*storage.Alert_Violation{{Message: msg}}) - } - } - } - } - return - } - - for id := range suite.deployments { - violations, expected := c.expectedViolations[id] - if expected { - assert.Contains(t, actualViolations, id) - - if c.allowUnvalidatedViolations { - assert.NotEmpty(t, violations) - for _, violation := range violations { - protoassert.SliceContains(t, actualViolations[id], violation) - } - } else { - protoassert.SlicesEqual(t, violations, actualViolations[id]) - } - } else { - assert.NotContains(t, actualViolations, id) - } - } - - }) - } - - imageTestCases := []testCase{ - { - policyName: "Latest tag", - expectedViolations: map[string][]*storage.Alert_Violation{ - fixtureDep.GetContainers()[1].GetImage().GetId(): { - {Message: "Image has tag 'latest'"}, - }, - }, - }, - { - policyName: "Alpine Linux Package Manager (apk) in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(apkDep): { - { - Message: "Image includes component 'apk-tools' (version 1.2)", - }, - }, - }, - }, - { - policyName: "Ubuntu Package Manager in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(componentDeps["apt"]): { - { - Message: "Image includes component 'apt'", - }, - }, - }, - }, - { - policyName: "Curl in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(curlDep): { - { - Message: "Image includes component 'curl' (version 1.3)", - }, - }, - }, - }, - { - policyName: "Red Hat Package Manager in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(componentDeps["dnf"]): { - { - Message: "Image includes component 'dnf'", - }, - }, - }, - }, - { - policyName: "Wget in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(componentDeps["wget"]): { - { - Message: "Image includes component 'wget'", - }, - }, - }, - }, - { - policyName: "90-Day Image Age", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(oldImageDep): { - { - Message: fmt.Sprintf("Image was created at %s (UTC)", readable.Time(oldImageCreationTime)), - }, - }, - }, - }, - { - policyName: "30-Day Scan Age", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(oldScannedDep): { - { - Message: fmt.Sprintf("Image was last scanned at %s (UTC)", readable.Time(oldScannedTime)), - }, - }, - }, - }, - { - policyName: "Secure Shell (ssh) Port Exposed in Image", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(imagePort22Dep): { - { - Message: "Dockerfile line 'EXPOSE 22/tcp' found", - }, - }, - }, - }, - { - policyName: "Insecure specified in CMD", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(insecureCMDDep): { - { - Message: "Dockerfile line 'CMD do an insecure thing' found", - }, - }, - }, - }, - { - policyName: "Improper Usage of Orchestrator Secrets Volume", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(runSecretsDep): { - { - Message: "Dockerfile line 'VOLUME /run/secrets' found", - }, - }, - suite.imageIDFromDep(runSecretsArrayDep): { - { - Message: "Dockerfile line 'VOLUME [/run/secrets]' found", - }, - }, - suite.imageIDFromDep(runSecretsListDep): { - { - Message: "Dockerfile line 'VOLUME /var/something /run/secrets' found", - }, - }, - suite.imageIDFromDep(runSecretsArrayListDep): { - { - Message: "Dockerfile line 'VOLUME [/var/something /run/secrets]' found", - }, - }, - }, - }, - { - policyName: "Images with no scans", - shouldNotMatch: map[string]struct{}{ - oldScannedImage.GetId(): {}, - suite.imageIDFromDep(heartbleedDep): {}, - apkImage.GetId(): {}, - curlImage.GetId(): {}, - suite.imageIDFromDep(componentDeps["apt"]): {}, - suite.imageIDFromDep(componentDeps["dnf"]): {}, - suite.imageIDFromDep(componentDeps["wget"]): {}, - shellshockImage.GetId(): {}, - suppressedShellshockImage.GetId(): {}, - strutsImage.GetId(): {}, - strutsImageSuppressed.GetId(): {}, - structImageWithDeferredVulns.GetId(): {}, - depWithNonSeriousVulnsImage.GetId(): {}, - fixtureDep.GetContainers()[0].GetImage().GetId(): {}, - fixtureDep.GetContainers()[1].GetImage().GetId(): {}, - suite.imageIDFromDep(oldScannedDep): {}, - imgWithFixedByEmpty.GetId(): {}, - imgWithFixedByEmptyOnlyForSome.GetId(): {}, - }, - sampleViolationForMatched: "Image has not been scanned", - expectedViolations: map[string][]*storage.Alert_Violation{}, - }, - { - policyName: "Apache Struts: CVE-2017-5638", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(strutsDep): { - { - Message: "CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2)", - }, - }, - }, - }, - { - policyName: "Fixable CVSS >= 7", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(strutsDep): { - { - Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2), resolved by version v1.3", - }, - }, - imgWithFixedByEmptyOnlyForSome.GetId(): { - { - Message: "Fixable CVE-5612-1245 (CVSS 8) (severity Critical) found in component 'Normal' (version 2.3), resolved by version actually_fixable", - }, - }, - }, - }, - { - policyName: "Fixable Severity at least Important", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(strutsDep): { - { - Message: "Fixable CVE-2017-5638 (CVSS 8) (severity Important) found in component 'struts' (version 1.2), resolved by version v1.3", - }, - }, - imgWithFixedByEmptyOnlyForSome.GetId(): { - { - Message: "Fixable CVE-5612-1245 (CVSS 8) (severity Critical) found in component 'Normal' (version 2.3), resolved by version actually_fixable", - }, - }, - }, - }, - { - policyName: "ADD Command used instead of COPY", - expectedViolations: map[string][]*storage.Alert_Violation{ - suite.imageIDFromDep(addDockerFileDep): { - { - Message: "Dockerfile line 'ADD deploy.sh' found", - }, - }, - fixtureDep.GetContainers()[0].GetImage().GetId(): { - { - Message: "Dockerfile line 'ADD FILE:blah' found", - }, - }, - fixtureDep.GetContainers()[1].GetImage().GetId(): { - { - Message: "Dockerfile line 'ADD file:4eedf861fb567fffb2694b65ebd...' found", - }, - }, - }, - }, - { - policyName: "Required Image Label", - shouldNotMatch: map[string]struct{}{ - "requiredImageLabelImage": {}, - }, - sampleViolationForMatched: "Required label not found (found labels: )", - }, - - { - // We can only test that the policy triggers for unverified images. The "shouldNotMatch" field cannot be - // used to verify that signed images don't trigger violations, because then the logic expects that all - // other images (not listed in shouldNotMatch) trigger a violation; and in this case only unsigned images - // in Red Hat registries trigger violations - any other unsigned images are fine and should not trigger. - policyName: "Red Hat images must be signed by a Red Hat release key", - expectedViolations: map[string][]*storage.Alert_Violation{ - registryRedHatIoUnverifiedImg.GetId(): { - { - Message: "Image has registry 'registry.redhat.io'", - }, - { - Message: "Image signature is not verified by the specified signature integration(s).", - }, - }, - registryAccessRedhatComUnverifiedImg.GetId(): { - { - Message: "Image has registry 'registry.access.redhat.com'", - }, - { - Message: "Image signature is not verified by the specified signature integration(s).", - }, - }, - quayOCPReleaseUnverifiedImg.GetId(): { - { - Message: "Image has registry 'quay.io' and remote 'openshift-release-dev/ocp-release'", - }, - { - Message: "Image signature is not verified by the specified signature integration(s).", - }, - }, - quayOCPArtDevUnverifiedImg.GetId(): { - { - Message: "Image has registry 'quay.io' and remote 'openshift-release-dev/ocp-v4.0-art-dev'", - }, - { - Message: "Image signature is not verified by the specified signature integration(s).", - }, - }, - }, - }, - } - - for _, c := range imageTestCases { - p := suite.MustGetPolicy(c.policyName) - suite.T().Run(fmt.Sprintf("%s (on images)", c.policyName), func(t *testing.T) { - assert.Nil(t, c.expectedProcessViolations) - - m, err := BuildImageMatcher(p) - require.NoError(t, err) - - actualViolations := make(map[string][]*storage.Alert_Violation) - for id, image := range suite.images { - violationsForImg := getViolationsWithAndWithoutCaching(t, func(cache *CacheReceptacle) (Violations, error) { - return m.MatchImage(cache, image) - }) - suite.Nil(violationsForImg.ProcessViolation) - if alertViolations := violationsForImg.AlertViolations; len(alertViolations) > 0 { - actualViolations[id] = alertViolations - } - } - - for id, violations := range c.expectedViolations { - assert.Contains(t, actualViolations, id) - protoassert.SlicesEqual(t, violations, actualViolations[id]) - } - if len(c.shouldNotMatch) > 0 { - if c.policyName == "Required Image Label" { - for id, image := range suite.images { - if image.GetMetadata() == nil { - c.shouldNotMatch[id] = struct{}{} - } - } - } - - for shouldNotMatchID := range c.shouldNotMatch { - assert.Contains(t, suite.images, shouldNotMatchID, "%s is not a known image id in the suite", shouldNotMatchID) - assert.NotContains(t, actualViolations, shouldNotMatchID) - } - - for id := range suite.images { - if _, shouldNotMatch := c.shouldNotMatch[id]; !shouldNotMatch { - assert.Contains(t, actualViolations, id) - protoassert.SlicesEqual(t, actualViolations[id], []*storage.Alert_Violation{{Message: c.sampleViolationForMatched}}) - } - } - } - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestMapPolicyMatchOne() { - noAnnotation := &storage.Deployment{ - Id: "noAnnotation", - } - suite.addDepAndImages(noAnnotation) - - noValidAnnotation := &storage.Deployment{ - Id: "noValidAnnotation", - Annotations: map[string]string{ - "email": "notavalidemail", - "someotherannotation": "vv@stackrox.com", - }, - } - suite.addDepAndImages(noValidAnnotation) - - validAnnotation := &storage.Deployment{ - Id: "validAnnotation", - Annotations: map[string]string{ - "email": "joseph@rules.gov", - }, - } - suite.addDepAndImages(validAnnotation) - - policy := suite.defaultPolicies["Required Annotation: Email"] - - m, err := BuildDeploymentMatcher(policy) - suite.NoError(err) - - for _, testCase := range []struct { - dep *storage.Deployment - expectedViolations []string - }{ - { - noAnnotation, - []string{"Required annotation not found (found annotations: )"}, - }, - { - noValidAnnotation, - []string{"Required annotation not found (found annotations: email=notavalidemail, someotherannotation=vv@stackrox.com)"}, - }, - { - validAnnotation, - nil, - }, - } { - c := testCase - suite.Run(c.dep.GetId(), func() { - matched, err := m.MatchDeployment(nil, enhancedDeployment(c.dep, nil)) - suite.NoError(err) - var expectedMessages []*storage.Alert_Violation - for _, v := range c.expectedViolations { - expectedMessages = append(expectedMessages, &storage.Alert_Violation{Message: v}) - } - protoassert.SlicesEqual(suite.T(), matched.AlertViolations, expectedMessages) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestRuntimePolicyFieldsCompile() { - for _, p := range suite.defaultPolicies { - if policyUtils.AppliesAtRunTime(p) { - checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessName) - checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessArguments) - checkRegexCompiles(p.GetPolicySections(), fieldnames.ProcessAncestor) - } - } -} - -func checkRegexCompiles(sections []*storage.PolicySection, fieldname string) { - for _, s := range sections { - for _, g := range s.GetPolicyGroups() { - if g.GetFieldName() == fieldname { - if policyVals := g.GetValues(); len(policyVals) > 0 { - for _, policyVal := range policyVals { - if v := policyVal.GetValue(); v != "" { - regexp.MustCompile(v) - } - } - } - } - } - } -} - -func policyWithGroups(eventSrc storage.EventSource, groups ...*storage.PolicyGroup) *storage.Policy { - return &storage.Policy{ - PolicyVersion: policyversion.CurrentVersion().String(), - Name: uuid.NewV4().String(), - EventSource: eventSrc, - PolicySections: []*storage.PolicySection{{PolicyGroups: groups}}, - } -} - -func policyGroupWithSingleKeyValue(fieldName, value string, negate bool) *storage.PolicyGroup { - return &storage.PolicyGroup{FieldName: fieldName, Values: []*storage.PolicyValue{{Value: value}}, Negate: negate} -} - -func policyWithSingleKeyValue(fieldName, value string, negate bool) *storage.Policy { - return policyWithGroups(storage.EventSource_NOT_APPLICABLE, policyGroupWithSingleKeyValue(fieldName, value, negate)) -} - -func policyWithSingleFieldAndValues(fieldName string, values []string, negate bool, op storage.BooleanOperator) *storage.Policy { - return policyWithGroups(storage.EventSource_NOT_APPLICABLE, &storage.PolicyGroup{FieldName: fieldName, Values: sliceutils.Map(values, func(val string) *storage.PolicyValue { - return &storage.PolicyValue{Value: val} - }), Negate: negate, BooleanOperator: op}) -} - -func processBaselineMessage(dep *storage.Deployment, baseline bool, privileged bool, processNames ...string) []*storage.Alert_Violation { - violations := make([]*storage.Alert_Violation, 0, len(processNames)) - containerName := dep.GetContainers()[0].GetName() - for _, p := range processNames { - if baseline { - msg := fmt.Sprintf("Unexpected process '%s' in container '%s'", p, containerName) - violations = append(violations, &storage.Alert_Violation{Message: msg}) - } - if privileged { - violations = append(violations, privilegedMessage(dep)...) - } - } - return violations -} - -func networkBaselineMessage( - suite *DefaultPoliciesTestSuite, - flow *augmentedobjs.NetworkFlowDetails, -) *storage.Alert_Violation { - violation, err := printer.GenerateNetworkFlowViolation(flow) - suite.Nil(err) - return violation -} - -func assertNetworkBaselineMessagesEqual( - suite *DefaultPoliciesTestSuite, - this []*storage.Alert_Violation, - that []*storage.Alert_Violation, -) { - thisWithoutTime := make([]*storage.Alert_Violation, 0, len(this)) - thatWithoutTime := make([]*storage.Alert_Violation, 0, len(that)) - for _, violation := range this { - cp := violation.CloneVT() - cp.Time = nil - thisWithoutTime = append(thisWithoutTime, cp) - } - for _, violation := range that { - cp := violation.CloneVT() - cp.Time = nil - thatWithoutTime = append(thatWithoutTime, cp) - } - protoassert.ElementsMatch(suite.T(), thisWithoutTime, thatWithoutTime) -} - -func privilegedMessage(dep *storage.Deployment) []*storage.Alert_Violation { - containerName := dep.GetContainers()[0].GetName() - return []*storage.Alert_Violation{{Message: fmt.Sprintf("Container '%s' is privileged", containerName)}} -} - -func rbacPermissionMessage(level string) []*storage.Alert_Violation { - permissionToDescMap := map[string]string{ - "NONE": "no specified access", - "DEFAULT": "default access", - "ELEVATED_IN_NAMESPACE": "elevated access in namespace", - "ELEVATED_CLUSTER_WIDE": "elevated access cluster wide", - "CLUSTER_ADMIN": "cluster admin access"} - return []*storage.Alert_Violation{{Message: fmt.Sprintf("Service account permission level with %s", permissionToDescMap[level])}} -} - -func (suite *DefaultPoliciesTestSuite) TestK8sRBACField() { - deployments := make(map[string]*storage.Deployment) - for permissionLevelStr, permissionLevel := range storage.PermissionLevel_value { - dep := fixtures.GetDeployment().CloneVT() - dep.ServiceAccountPermissionLevel = storage.PermissionLevel(permissionLevel) - deployments[permissionLevelStr] = dep - } - - for _, testCase := range []struct { - value string - negate bool - expectedMatches []string - // Deployment ids to violations - expectedViolations map[string][]*storage.Alert_Violation - }{ - { - "DEFAULT", - false, - []string{"DEFAULT", "ELEVATED_IN_NAMESPACE", "ELEVATED_CLUSTER_WIDE", "CLUSTER_ADMIN"}, - map[string][]*storage.Alert_Violation{ - "DEFAULT": rbacPermissionMessage("DEFAULT"), - "ELEVATED_CLUSTER_WIDE": rbacPermissionMessage("ELEVATED_CLUSTER_WIDE"), - "ELEVATED_IN_NAMESPACE": rbacPermissionMessage("ELEVATED_IN_NAMESPACE"), - "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), - }, - }, - { - "ELEVATED_CLUSTER_WIDE", - false, - []string{"ELEVATED_CLUSTER_WIDE", "CLUSTER_ADMIN"}, - map[string][]*storage.Alert_Violation{ - "ELEVATED_CLUSTER_WIDE": rbacPermissionMessage("ELEVATED_CLUSTER_WIDE"), - "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), - }, - }, - { - "cluster_admin", - false, - []string{"CLUSTER_ADMIN"}, - map[string][]*storage.Alert_Violation{ - "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), - }, - }, - { - "ELEVATED_CLUSTER_WIDE", - true, - []string{"NONE", "DEFAULT", "ELEVATED_IN_NAMESPACE"}, - map[string][]*storage.Alert_Violation{ - "ELEVATED_IN_NAMESPACE": rbacPermissionMessage("ELEVATED_IN_NAMESPACE"), - "NONE": rbacPermissionMessage("NONE"), - "DEFAULT": rbacPermissionMessage("DEFAULT"), - }, - }, - } { - c := testCase - suite.T().Run(fmt.Sprintf("%+v", c.expectedMatches), func(t *testing.T) { - matcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.MinimumRBACPermissions, c.value, c.negate)) - require.NoError(t, err) - matched := set.NewStringSet() - for depRef, dep := range deployments { - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - matched.Add(depRef) - protoassert.SlicesEqual(t, violations.AlertViolations, c.expectedViolations[depRef]) - } else { - assert.Empty(t, c.expectedViolations[depRef]) - } - } - assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestPortExposure() { - deployments := make(map[string]*storage.Deployment) - for exposureLevelStr, exposureLevel := range storage.PortConfig_ExposureLevel_value { - dep := fixtures.GetDeployment().CloneVT() - dep.Ports = []*storage.PortConfig{{ExposureInfos: []*storage.PortConfig_ExposureInfo{{Level: storage.PortConfig_ExposureLevel(exposureLevel)}}}} - deployments[exposureLevelStr] = dep - } - - assertMessageMatches := func(t *testing.T, depRef string, violations []*storage.Alert_Violation) { - depRefToExpectedMsg := map[string]string{ - "EXTERNAL": "exposed with load balancer", - "NODE": "exposed on node port", - "INTERNAL": "using internal cluster IP", - "HOST": "exposed on host port", - "ROUTE": "exposed with a route", - } - require.Len(t, violations, 1) - assert.Equal(t, fmt.Sprintf("Deployment port(s) %s", depRefToExpectedMsg[depRef]), violations[0].GetMessage()) - } - - for _, testCase := range []struct { - values []string - negate bool - expectedMatches []string - }{ - { - []string{"external"}, - false, - []string{"EXTERNAL"}, - }, - { - []string{"external", "NODE"}, - false, - []string{"EXTERNAL", "NODE"}, - }, - { - []string{"external", "NODE"}, - true, - []string{"INTERNAL", "HOST", "ROUTE"}, - }, - } { - c := testCase - suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.PortExposure, c.values, c.negate, storage.BooleanOperator_OR)) - require.NoError(t, err) - matched := set.NewStringSet() - for depRef, dep := range deployments { - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - assertMessageMatches(t, depRef, violations.AlertViolations) - matched.Add(depRef) - } - } - assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestImageOS() { - depToImg := make(map[*storage.Deployment]*storage.Image) - for _, imgName := range []string{ - "unknown", - "alpine:v3.4", - "alpine:v3.11", - "ubuntu:20.04", - "debian:8", - "debian:10", - } { - img := imageWithOS(imgName) - dep := fixtures.GetDeployment().CloneVT() - dep.Containers = []*storage.Container{ - { - Name: imgName, - Image: types.ToContainerImage(img), - }, - } - depToImg[dep] = img - } - - for _, testCase := range []struct { - value string - expectedMatches []string - }{ - { - value: "unknown", - expectedMatches: []string{"unknown"}, - }, - { - value: "alpine", - expectedMatches: []string{}, - }, - { - value: "alpine.*", - expectedMatches: []string{"alpine:v3.4", "alpine:v3.11"}, - }, - { - value: "debian:8", - expectedMatches: []string{"debian:8"}, - }, - { - value: "centos", - expectedMatches: nil, - }, - } { - c := testCase - - suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { - depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.ImageOS, c.value, false)) - require.NoError(t, err) - depMatched := set.NewStringSet() - for dep, img := range depToImg { - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, []*storage.Image{img})) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - depMatched.Add(img.GetScan().GetOperatingSystem()) - require.Len(t, violations.AlertViolations, 1) - assert.Equal(t, fmt.Sprintf("Container '%s' has image with base OS '%s'", dep.GetContainers()[0].GetName(), img.GetScan().GetOperatingSystem()), violations.AlertViolations[0].GetMessage()) - } - } - assert.ElementsMatch(t, depMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", depMatched.AsSlice(), c.value, c.expectedMatches) - }) - - suite.T().Run(fmt.Sprintf("ImageMatcher %+v", c), func(t *testing.T) { - imgMatcher, err := BuildImageMatcher(policyWithSingleKeyValue(fieldnames.ImageOS, c.value, false)) - require.NoError(t, err) - imgMatched := set.NewStringSet() - for _, img := range depToImg { - violations, err := imgMatcher.MatchImage(nil, img) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - imgMatched.Add(img.GetScan().GetOperatingSystem()) - require.Len(t, violations.AlertViolations, 1) - assert.Equal(t, fmt.Sprintf("Image has base OS '%s'", img.GetScan().GetOperatingSystem()), violations.AlertViolations[0].GetMessage()) - } - } - assert.ElementsMatch(t, imgMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", imgMatched.AsSlice(), c.value, c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestImageVerified() { - const ( - verifier0 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000001" - verifier1 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000002" - verifier2 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000003" - verifier3 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000004" - unverifier = "io.stackrox.signatureintegration.00000000-0000-0000-0000-00000000000F" - ) - - var images = []*storage.Image{ - suite.imageWithSignatureVerificationResults("image_no_results", []*storage.ImageSignatureVerificationResult{{}}), - suite.imageWithSignatureVerificationResults("image_empty_results", []*storage.ImageSignatureVerificationResult{{ - VerifierId: "", - Status: storage.ImageSignatureVerificationResult_UNSET, - }}), - suite.imageWithSignatureVerificationResults("image_nil_results", nil), - suite.imageWithSignatureVerificationResults("verified_by_0", []*storage.ImageSignatureVerificationResult{{ - VerifierId: verifier0, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"verified_by_0"}, - }}), - suite.imageWithSignatureVerificationResults("unverified_image", []*storage.ImageSignatureVerificationResult{{ - VerifierId: unverifier, - Status: storage.ImageSignatureVerificationResult_UNSET, - }}), - suite.imageWithSignatureVerificationResults("verified_by_3", []*storage.ImageSignatureVerificationResult{{ - VerifierId: verifier2, - Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, - }, { - VerifierId: verifier3, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"verified_by_3"}, - }}), - suite.imageWithSignatureVerificationResults("verified_by_2_and_3", []*storage.ImageSignatureVerificationResult{{ - VerifierId: verifier2, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"verified_by_2_and_3"}, - }, { - VerifierId: verifier3, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"verified_by_2_and_3"}, - }}), - } - - var allImages set.FrozenStringSet - { - ai := set.NewStringSet() - for _, img := range images { - ai.Add(img.GetName().GetFullName()) - } - allImages = ai.Freeze() - } - - getViolationMessage := func(img *storage.Image) string { - message := strings.Builder{} - message.WriteString("Image signature is not verified by the specified signature integration(s)") - successfulVerifierIDs := []string{} - for _, r := range img.GetSignatureVerificationData().GetResults() { - if r.GetVerifierId() != "" && r.GetStatus() == storage.ImageSignatureVerificationResult_VERIFIED { - successfulVerifierIDs = append(successfulVerifierIDs, r.GetVerifierId()) - } - } - if len(successfulVerifierIDs) > 0 { - message.WriteString(fmt.Sprintf(" (it is verified by other integration(s): %s)", printer.StringSliceToSortedSentence(successfulVerifierIDs))) - } - message.WriteString(".") - return message.String() - } - - suite.Run("Test disallowed AND operator", func() { - _, err := BuildImageMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, - []string{verifier0}, false, storage.BooleanOperator_AND)) - suite.EqualError(err, - "policy validation error: operator AND is not allowed for field \"Image Signature Verified By\"") - }) - - for i, testCase := range []struct { - values []string - expectedMatches set.FrozenStringSet - }{ - { - values: []string{unverifier}, - expectedMatches: allImages, - }, - { - values: []string{verifier0}, - expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_0")), - }, - { - values: []string{verifier1}, - expectedMatches: allImages, - }, - { - values: []string{verifier2}, - expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_2_and_3")), - }, - { - values: []string{verifier3}, - expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_3", "verified_by_2_and_3")), - }, - { - values: []string{verifier0, verifier2}, - expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_0", "verified_by_2_and_3")), - }, - { - values: []string{verifier2, verifier3}, - expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_3", "verified_by_2_and_3")), - }, - } { - c := testCase - - suite.Run(fmt.Sprintf("ImageMatcher %d: %+v", i, c), func() { - imgMatcher, err := BuildImageMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, - c.values, false, storage.BooleanOperator_OR)) - suite.NoError(err) - matchedImages := set.NewStringSet() - for _, img := range images { - violations, err := imgMatcher.MatchImage(nil, img) - suite.NoError(err) - if len(violations.AlertViolations) == 0 { - continue - } - matchedImages.Add(img.GetName().GetFullName()) - suite.Truef(c.expectedMatches.Contains(img.GetName().GetFullName()), "Image %q should not match", - img.GetName().GetFullName()) - - for _, violation := range violations.AlertViolations { - suite.Equal(getViolationMessage(img), violation.GetMessage()) - } - } - suite.True(c.expectedMatches.Difference(matchedImages.Freeze()).IsEmpty(), matchedImages) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestImageVerified_WithDeployment() { - const ( - verifier1 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000002" - verifier2 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000003" - verifier3 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000004" - ) - - imgVerifiedAndMatchingReference := suite.imageWithSignatureVerificationResults("image_verified_by_1", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: verifier1, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"image_verified_by_1"}, - }, - }) - - imgVerifiedAndMatchingMultipleReferences := suite.imageWithSignatureVerificationResults("image_verified_by_2", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: verifier3, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"image_with_alternative_verified_reference", "image_verified_by_2"}, - }, - }) - - imgVerifiedButNotMatchingReference := suite.imageWithSignatureVerificationResults("image_with_alternative_verified_reference", - []*storage.ImageSignatureVerificationResult{ - { - VerifierId: verifier2, - Status: storage.ImageSignatureVerificationResult_VERIFIED, - VerifiedImageReferences: []string{"image_verified_by_2"}, - }, - }) - - cases := map[string]struct { - deployment *storage.Deployment - image *storage.Image - matchingVerifier string - expectViolation bool - }{ - "deployment with matching verified image reference shouldn't lead in alert message": { - deployment: deploymentWithImage("deployment_with_image_verified_by_1", imgVerifiedAndMatchingReference), - image: imgVerifiedAndMatchingReference, - matchingVerifier: verifier1, - }, - "deployment with verified result but no matching verified image reference should lead to alert message": { - deployment: deploymentWithImage("deployment_with_image_alternative_verified_reference", imgVerifiedButNotMatchingReference), - image: imgVerifiedButNotMatchingReference, - matchingVerifier: verifier2, - expectViolation: true, - }, - "deployment with verified result and multiple matching verified image references shouldn't lead to alert message": { - deployment: deploymentWithImage("deployment_with_image_verified_by_2", imgVerifiedAndMatchingMultipleReferences), - image: imgVerifiedAndMatchingMultipleReferences, - matchingVerifier: verifier3, - }, - } - - for name, c := range cases { - suite.Run(name, func() { - deploymentMatcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, - []string{c.matchingVerifier}, false, storage.BooleanOperator_OR)) - suite.Require().NoError(err) - - violations, err := deploymentMatcher.MatchDeployment(nil, EnhancedDeployment{ - Deployment: c.deployment, - Images: []*storage.Image{c.image}, - }) - suite.Require().NoError(err) - - if c.expectViolation { - suite.NotEmpty(violations.AlertViolations) - } else { - suite.Empty(violations.AlertViolations) - } - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestContainerName() { - var deps []*storage.Deployment - for _, containerName := range []string{ - "container_staging", - "container_prod0", - "container_prod1", - "container_internal", - "external_container", - } { - dep := fixtures.GetDeployment().CloneVT() - dep.Containers = []*storage.Container{ - { - Name: containerName, - }, - } - deps = append(deps, dep) - } - - for _, testCase := range []struct { - value string - expectedMatches []string - negate bool - }{ - { - value: "container_[a-z0-9]*", - expectedMatches: []string{"container_staging", "container_prod0", "container_prod1", "container_internal"}, - negate: false, - }, - { - value: "container_prod[a-z0-9]*", - expectedMatches: []string{"container_prod0", "container_prod1"}, - negate: false, - }, - { - value: ".*external.*", - expectedMatches: []string{"external_container"}, - negate: false, - }, - { - value: "doesnotexist", - expectedMatches: nil, - negate: false, - }, - { - value: ".*internal.*", - expectedMatches: []string{"container_staging", "container_prod0", "container_prod1", "external_container"}, - negate: true, - }, - } { - c := testCase - - suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { - depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.ContainerName, c.value, c.negate)) - require.NoError(t, err) - containerNameMatched := set.NewStringSet() - for _, dep := range deps { - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - // No match in case we are testing for doesnotexist - if len(violations.AlertViolations) > 0 { - containerNameMatched.Add(dep.GetContainers()[0].GetName()) - require.Len(t, violations.AlertViolations, 1) - assert.Equal(t, fmt.Sprintf("Container has name '%s'", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) - } - } - assert.ElementsMatch(t, containerNameMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", containerNameMatched.AsSlice(), c.value, c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestAllowPrivilegeEscalationPolicyCriteria() { - const containerAllowPrivEsc = "Container with Privilege Escalation allowed" - const containerNotAllowPrivEsc = "Container with Privilege Escalation not allowed" - - var deps []*storage.Deployment - for _, d := range []struct { - ContainerName string - AllowPrivilegeEscalation bool - }{ - { - ContainerName: containerAllowPrivEsc, - AllowPrivilegeEscalation: true, - }, - { - ContainerName: containerNotAllowPrivEsc, - AllowPrivilegeEscalation: false, - }, - } { - dep := fixtures.GetDeployment().CloneVT() - dep.Containers[0].Name = d.ContainerName - if d.AllowPrivilegeEscalation { - dep.Containers[0].SecurityContext.AllowPrivilegeEscalation = d.AllowPrivilegeEscalation - } - deps = append(deps, dep) - } - - for _, testCase := range []struct { - CaseName string - value string - expectedMatches []string - }{ - { - CaseName: "Policy for containers with privilege escalation allowed", - value: "true", - expectedMatches: []string{containerAllowPrivEsc}, - }, - { - CaseName: "Policy for containers with privilege escalation not allowed", - value: "false", - expectedMatches: []string{containerNotAllowPrivEsc}, - }, - } { - c := testCase - - suite.T().Run(c.CaseName, func(t *testing.T) { - depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.AllowPrivilegeEscalation, c.value, false)) - require.NoError(t, err) - containerNameMatched := set.NewStringSet() - for _, dep := range deps { - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - containerNameMatched.Add(dep.GetContainers()[0].GetName()) - require.Len(t, violations.AlertViolations, 1) - if c.value == "true" { - assert.Equal(t, fmt.Sprintf("Container '%s' allows privilege escalation", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) - } else { - assert.Equal(t, fmt.Sprintf("Container '%s' does not allow privilege escalation", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) - } - } - } - assert.ElementsMatch(t, containerNameMatched.AsSlice(), c.expectedMatches, "Matched containers %v for policy %v; expected: %v", containerNameMatched.AsSlice(), c.value, c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestAutomountServiceAccountToken() { - deployments := make(map[string]*storage.Deployment) - for _, d := range []struct { - DeploymentName string - ServiceAccountName string - AutomountServiceAccountTokens bool - }{ - { - DeploymentName: "DefaultSAAutomountedTokens", - ServiceAccountName: "default", - AutomountServiceAccountTokens: true, - }, - { - DeploymentName: "DefaultSANotAutomountedTokens", - ServiceAccountName: "default", - }, - { - DeploymentName: "CustomSAAutomountedTokens", - ServiceAccountName: "custom", - AutomountServiceAccountTokens: true, - }, - { - DeploymentName: "CustomSANotAutomountedTokens", - ServiceAccountName: "custom", - }, - } { - dep := fixtures.GetDeployment().CloneVT() - dep.Name = d.DeploymentName - dep.ServiceAccount = d.ServiceAccountName - dep.AutomountServiceAccountToken = d.AutomountServiceAccountTokens - deployments[dep.GetName()] = dep - } - - automountServiceAccountTokenPolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.AutomountServiceAccountToken, - Values: []*storage.PolicyValue{{Value: "true"}}, - } - defaultServiceAccountPolicyGroup := &storage.PolicyGroup{ - FieldName: fieldnames.ServiceAccount, - Values: []*storage.PolicyValue{{Value: "default"}}, - } - - allAutomountServiceAccountTokenPolicy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, automountServiceAccountTokenPolicyGroup) - defaultAutomountServiceAccountTokenPolicy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, automountServiceAccountTokenPolicyGroup, defaultServiceAccountPolicyGroup) - - automountAlert := &storage.Alert_Violation{Message: "Deployment mounts the service account tokens."} - defaultServiceAccountAlert := &storage.Alert_Violation{Message: "Service Account is set to 'default'"} - - for _, c := range []struct { - CaseName string - Policy *storage.Policy - DeploymentName string - ExpectedAlerts []*storage.Alert_Violation - }{ - { - CaseName: "Automounted default service account tokens should alert on bare automount policy", - Policy: allAutomountServiceAccountTokenPolicy, - DeploymentName: "DefaultSAAutomountedTokens", - ExpectedAlerts: []*storage.Alert_Violation{automountAlert}, - }, - { - CaseName: "Automounted default service account tokens should alert on default only automount policy", - Policy: defaultAutomountServiceAccountTokenPolicy, - DeploymentName: "DefaultSAAutomountedTokens", - ExpectedAlerts: []*storage.Alert_Violation{automountAlert, defaultServiceAccountAlert}, - }, - { - CaseName: "Automounted custom service account tokens should alert on bare automount policy", - Policy: allAutomountServiceAccountTokenPolicy, - DeploymentName: "CustomSAAutomountedTokens", - ExpectedAlerts: []*storage.Alert_Violation{automountAlert}, - }, - { - CaseName: "Not automounted default service account should not alert on bare automount policy", - Policy: allAutomountServiceAccountTokenPolicy, - DeploymentName: "DefaultSANotAutomountedTokens", - }, - { - CaseName: "Not automounted custom service account should not alert on bare automount policy", - Policy: allAutomountServiceAccountTokenPolicy, - DeploymentName: "CustomSANotAutomountedTokens", - }, - } { - suite.T().Run(c.CaseName, func(t *testing.T) { - dep := deployments[c.DeploymentName] - matcher, err := BuildDeploymentMatcher(c.Policy) - suite.NoError(err, "deployment matcher creation must succeed") - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - suite.NoError(err, "deployment matcher run must succeed") - suite.Empty(violations.ProcessViolation) - protoassert.SlicesEqual(suite.T(), c.ExpectedAlerts, violations.AlertViolations) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestRuntimeClass() { - var deps []*storage.Deployment - for _, runtimeClass := range []string{ - "", - "blah", - } { - dep := fixtures.GetDeployment().CloneVT() - dep.RuntimeClass = runtimeClass - deps = append(deps, dep) - } - - for _, testCase := range []struct { - value string - negate bool - expectedMatches []string - }{ - { - value: ".*", - negate: false, - expectedMatches: []string{"", "blah"}, - }, - { - value: ".+", - negate: false, - expectedMatches: []string{"blah"}, - }, - { - value: ".+", - negate: true, - expectedMatches: []string{""}, - }, - { - value: "blah", - negate: true, - expectedMatches: []string{""}, - }, - } { - c := testCase - - suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.RuntimeClass, c.value, c.negate)) - require.NoError(t, err) - matchedRuntimeClasses := set.NewStringSet() - for _, dep := range deps { - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - matchedRuntimeClasses.Add(dep.GetRuntimeClass()) - require.Len(t, violations.AlertViolations, 1) - assert.Equal(t, fmt.Sprintf("Runtime Class is set to '%s'", dep.GetRuntimeClass()), violations.AlertViolations[0].GetMessage()) - } - } - assert.ElementsMatch(t, matchedRuntimeClasses.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", matchedRuntimeClasses.AsSlice(), c.value, c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestNamespace() { - var deps []*storage.Deployment - for _, namespace := range []string{ - "dep_staging", - "dep_prod0", - "dep_prod1", - "dep_internal", - "external_dep", - } { - dep := fixtures.GetDeployment().CloneVT() - dep.Namespace = namespace - deps = append(deps, dep) - } - - for _, testCase := range []struct { - value string - expectedMatches []string - negate bool - }{ - { - value: "dep_[a-z0-9]*", - expectedMatches: []string{"dep_staging", "dep_prod0", "dep_prod1", "dep_internal"}, - negate: false, - }, - { - value: "dep_prod[a-z0-9]*", - expectedMatches: []string{"dep_prod0", "dep_prod1"}, - negate: false, - }, - { - value: ".*external.*", - expectedMatches: []string{"external_dep"}, - negate: false, - }, - { - value: "doesnotexist", - expectedMatches: nil, - negate: false, - }, - { - value: ".*internal.*", - expectedMatches: []string{"dep_staging", "dep_prod0", "dep_prod1", "external_dep"}, - negate: true, - }, - } { - c := testCase - - suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { - depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.Namespace, c.value, c.negate)) - require.NoError(t, err) - namespacesMatched := set.NewStringSet() - for _, dep := range deps { - violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - // No match in case we are testing for doesnotexist - if len(violations.AlertViolations) > 0 { - namespacesMatched.Add(dep.GetNamespace()) - require.Len(t, violations.AlertViolations, 1) - assert.Equal(t, fmt.Sprintf("Namespace has name '%s'", dep.GetNamespace()), violations.AlertViolations[0].GetMessage()) - } - } - assert.ElementsMatch(t, namespacesMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", namespacesMatched.AsSlice(), c.value, c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestDropCaps() { - testCaps := []string{"SYS_MODULE", "SYS_NICE", "SYS_PTRACE", "ALL"} - - deployments := make(map[string]*storage.Deployment) - for _, idxs := range [][]int{{}, {0}, {1}, {2}, {0, 1}, {1, 2}, {0, 1, 2}, {3}} { - dep := fixtures.GetDeployment().CloneVT() - dep.Containers[0].SecurityContext.DropCapabilities = make([]string, 0, len(idxs)) - for _, idx := range idxs { - dep.Containers[0].SecurityContext.DropCapabilities = append(dep.Containers[0].SecurityContext.DropCapabilities, testCaps[idx]) - } - deployments[strings.ReplaceAll(strings.Join(dep.GetContainers()[0].GetSecurityContext().GetDropCapabilities(), ","), "SYS_", "")] = dep - } - - assertMessageMatches := func(t *testing.T, depRef string, violations []*storage.Alert_Violation) { - depRefToExpectedMsg := map[string]string{ - "": "no capabilities", - "ALL": "all capabilities", - "MODULE": "SYS_MODULE", - "NICE": "SYS_NICE", - "PTRACE": "SYS_PTRACE", - "MODULE,NICE": "SYS_MODULE and SYS_NICE", - "NICE,PTRACE": "SYS_NICE and SYS_PTRACE", - "MODULE,NICE,PTRACE": "SYS_MODULE, SYS_NICE, and SYS_PTRACE", - } - require.Len(t, violations, 1) - assert.Equal(t, fmt.Sprintf("Container 'nginx110container' does not drop expected capabilities (drops %s)", depRefToExpectedMsg[depRef]), violations[0].GetMessage()) - } - - for _, testCase := range []struct { - values []string - op storage.BooleanOperator - expectedMatches []string - }{ - { - // Nothing drops this capability - []string{"SYSLOG"}, - storage.BooleanOperator_OR, - []string{"", "MODULE", "NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, - }, - { - []string{"SYS_NICE"}, - storage.BooleanOperator_OR, - []string{"", "MODULE", "PTRACE"}, - }, - { - []string{"SYS_NICE", "SYS_PTRACE"}, - storage.BooleanOperator_OR, - []string{"", "MODULE"}, - }, - { - []string{"SYS_NICE", "SYS_PTRACE"}, - storage.BooleanOperator_AND, - []string{"", "MODULE", "PTRACE", "NICE", "MODULE,NICE"}, - }, - { - []string{"ALL"}, - storage.BooleanOperator_AND, - []string{"", "MODULE", "NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, - }, - } { - c := testCase - suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.DropCaps, c.values, false, c.op)) - require.NoError(t, err) - matched := set.NewStringSet() - for depRef, dep := range deployments { - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - matched.Add(depRef) - assertMessageMatches(t, depRef, violations.AlertViolations) - } - } - assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestAddCaps() { - testCaps := []string{"SYS_MODULE", "SYS_NICE", "SYS_PTRACE"} - - deployments := make(map[string]*storage.Deployment) - for _, idxs := range [][]int{{}, {0}, {1}, {2}, {0, 1}, {1, 2}, {0, 1, 2}} { - dep := fixtures.GetDeployment().CloneVT() - dep.Containers[0].SecurityContext.AddCapabilities = make([]string, 0, len(idxs)) - for _, idx := range idxs { - dep.Containers[0].SecurityContext.AddCapabilities = append(dep.Containers[0].SecurityContext.AddCapabilities, testCaps[idx]) - } - deployments[strings.ReplaceAll(strings.Join(dep.GetContainers()[0].GetSecurityContext().GetAddCapabilities(), ","), "SYS_", "")] = dep - } - - for _, testCase := range []struct { - values []string - op storage.BooleanOperator - expectedMatches []string - }{ - { - // Nothing adds this capability - []string{"SYSLOG"}, - storage.BooleanOperator_OR, - []string{}, - }, - { - []string{"SYS_NICE"}, - storage.BooleanOperator_OR, - []string{"NICE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, - }, - { - []string{"SYS_NICE", "SYS_PTRACE"}, - storage.BooleanOperator_OR, - []string{"NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, - }, - { - []string{"SYS_NICE", "SYS_PTRACE"}, - storage.BooleanOperator_AND, - []string{"NICE,PTRACE", "MODULE,NICE,PTRACE"}, - }, - } { - c := testCase - suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { - matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.AddCaps, c.values, false, c.op)) - require.NoError(t, err) - matched := set.NewStringSet() - for depRef, dep := range deployments { - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) - require.NoError(t, err) - if len(violations.AlertViolations) > 0 { - matched.Add(depRef) - require.Len(t, violations.AlertViolations, 1) - } - } - assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestProcessBaseline() { - privilegedDep := fixtures.GetDeployment().CloneVT() - privilegedDep.Id = "PRIVILEGED" - suite.addDepAndImages(privilegedDep) - - nonPrivilegedDep := fixtures.GetDeployment().CloneVT() - nonPrivilegedDep.Id = "NOTPRIVILEGED" - nonPrivilegedDep.Containers[0].SecurityContext.Privileged = false - suite.addDepAndImages(nonPrivilegedDep) - - const aptGetKey = "apt-get" - const aptGet2Key = "apt-get2" - const curlKey = "curl" - const bashKey = "bash" - - indicators := make(map[string]map[string]*storage.ProcessIndicator) - for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { - indicators[dep.GetId()] = map[string]*storage.ProcessIndicator{ - aptGetKey: suite.addIndicator(dep.GetId(), "apt-get", "install nginx", "/bin/apt-get", nil, 0), - aptGet2Key: suite.addIndicator(dep.GetId(), "apt-get", "update", "/bin/apt-get", nil, 0), - curlKey: suite.addIndicator(dep.GetId(), "curl", "https://stackrox.io", "/bin/curl", nil, 0), - bashKey: suite.addIndicator(dep.GetId(), "bash", "attach.sh", "/bin/bash", nil, 0), - } - } - processesNotInBaseline := map[string]set.StringSet{ - privilegedDep.GetId(): set.NewStringSet(aptGetKey, aptGet2Key, bashKey), - nonPrivilegedDep.GetId(): set.NewStringSet(aptGetKey, curlKey, bashKey), - } - - // Plain groups - aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) - privilegedGroup := policyGroupWithSingleKeyValue(fieldnames.PrivilegedContainer, "true", false) - baselineGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedProcessExecuted, "true", false) - - for _, testCase := range []struct { - groups []*storage.PolicyGroup - - // Deployment ids to indicator keys - expectedMatches map[string][]string - expectedProcessMatches map[string][]string - // Deployment ids to violations - expectedViolations map[string][]*storage.Alert_Violation - }{ - { - groups: []*storage.PolicyGroup{aptGetGroup}, - // only process violation, no alert violation - expectedMatches: map[string][]string{}, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - nonPrivilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - { - groups: []*storage.PolicyGroup{baselineGroup}, - expectedMatches: map[string][]string{}, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, - }, - }, - - { - groups: []*storage.PolicyGroup{privilegedGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, curlKey, bashKey}, - }, - expectedProcessMatches: map[string][]string{}, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get", "curl", "bash"), - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, baselineGroup}, - expectedMatches: map[string][]string{}, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - nonPrivilegedDep.GetId(): {aptGetKey}, - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get"), - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - { - groups: []*storage.PolicyGroup{privilegedGroup, baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup, baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - } { - c := testCase - suite.T().Run(fmt.Sprintf("%+v", c.groups), func(t *testing.T) { - policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) - - m, err := BuildDeploymentWithProcessMatcher(policy) - require.NoError(t, err) - - actualMatches := make(map[string][]string) - actualProcessMatches := make(map[string][]string) - actualViolations := make(map[string][]*storage.Alert_Violation) - for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { - for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { - violations, err := m.MatchDeploymentWithProcess(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep)), indicators[dep.GetId()][key], processesNotInBaseline[dep.GetId()].Contains(key)) - suite.Require().NoError(err) - if len(violations.AlertViolations) > 0 { - actualMatches[dep.GetId()] = append(actualMatches[dep.GetId()], key) - actualViolations[dep.GetId()] = append(actualViolations[dep.GetId()], violations.AlertViolations...) - } - if violations.ProcessViolation != nil { - actualProcessMatches[dep.GetId()] = append(actualProcessMatches[dep.GetId()], key) - } - - } - } - assert.Equal(t, c.expectedMatches, actualMatches) - assert.Equal(t, c.expectedProcessMatches, actualProcessMatches) - - for id, violations := range c.expectedViolations { - assert.Contains(t, actualViolations, id) - protoassert.ElementsMatch(t, violations, actualViolations[id]) - } - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestKubeEventConstraints() { - podExecGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_EXEC", false) - podAttachGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_ATTACH", false) - - aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) - - for _, c := range []struct { - event *storage.KubernetesEvent - groups []*storage.PolicyGroup - expectedViolations []*storage.Alert_Violation - builderErr bool - withProcessSection bool - }{ - // PODS_EXEC test cases - { - event: podExecEvent("p1", "c1", "cmd"), - groups: []*storage.PolicyGroup{podExecGroup}, - expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "cmd")}, - }, - { - event: podExecEvent("p1", "c1", ""), - groups: []*storage.PolicyGroup{podExecGroup}, - expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, - }, - - { - groups: []*storage.PolicyGroup{podExecGroup}, - }, - { - event: podPortForwardEvent("p1", 8000), - groups: []*storage.PolicyGroup{podExecGroup}, - }, - { - event: podPortForwardEvent("p1", 8000), - groups: []*storage.PolicyGroup{podExecGroup, aptGetGroup}, - builderErr: true, - }, - { - event: podExecEvent("p1", "c1", ""), - groups: []*storage.PolicyGroup{podExecGroup}, - expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, - withProcessSection: true, - }, - // PODS_ATTACH test cases - { - event: podAttachEvent("p1", "c1"), - groups: []*storage.PolicyGroup{podAttachGroup}, - expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "c1")}, - }, - { - event: podAttachEvent("p1", ""), - groups: []*storage.PolicyGroup{podAttachGroup}, - expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "")}, - }, - { - // No event provided, should not match - groups: []*storage.PolicyGroup{podAttachGroup}, - }, - { - // Port forward event should not match attach policy - event: podPortForwardEvent("p1", 8000), - groups: []*storage.PolicyGroup{podAttachGroup}, - }, - { - // Exec event should not match attach policy - event: podExecEvent("p1", "c1", "cmd"), - groups: []*storage.PolicyGroup{podAttachGroup}, - }, - { - // Attach event should not match exec policy - event: podAttachEvent("p1", "c1"), - groups: []*storage.PolicyGroup{podExecGroup}, - }, - { - // Attach policy with process group should fail builder - event: podAttachEvent("p1", "c1"), - groups: []*storage.PolicyGroup{podAttachGroup, aptGetGroup}, - builderErr: true, - }, - } { - suite.T().Run(fmt.Sprintf("%+v", c.groups), func(t *testing.T) { - policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) - if c.withProcessSection { - policy.PolicySections = append(policy.PolicySections, - &storage.PolicySection{PolicyGroups: []*storage.PolicyGroup{aptGetGroup}}) - } - - m, err := BuildKubeEventMatcher(policy) - if c.builderErr { - require.Error(t, err) - return - } - require.NoError(t, err) - - actualViolations, err := m.MatchKubeEvent(nil, c.event, &storage.Deployment{}) - suite.Require().NoError(err) - - assert.Nil(t, actualViolations.ProcessViolation) - if len(c.expectedViolations) == 0 { - assert.Nil(t, actualViolations.AlertViolations) - } else { - protoassert.ElementsMatch(t, c.expectedViolations, actualViolations.AlertViolations) - } - }) - } -} -func (suite *DefaultPoliciesTestSuite) TestKubeEventDefaultPolicies() { - for _, c := range []struct { - policyName string - event *storage.KubernetesEvent - expectedViolations []*storage.Alert_Violation - }{ - { - policyName: "Kubernetes Actions: Exec into Pod", - event: podExecEvent("p1", "c1", "apt-get"), - expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "apt-get")}, - }, - { - policyName: "Kubernetes Actions: Exec into Pod", - event: podPortForwardEvent("p1", 8000), - }, - // Event without CREATE. - { - policyName: "Kubernetes Actions: Exec into Pod", - event: &storage.KubernetesEvent{ - Object: &storage.KubernetesEvent_Object{ - Name: "p1", - Resource: storage.KubernetesEvent_Object_PODS_EXEC, - }, - ObjectArgs: &storage.KubernetesEvent_PodExecArgs_{ - PodExecArgs: &storage.KubernetesEvent_PodExecArgs{ - Container: "c1", - }, - }, - }, - expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, - }, - { - policyName: "Kubernetes Actions: Port Forward to Pod", - }, - { - policyName: "Kubernetes Actions: Port Forward to Pod", - event: podPortForwardEvent("p1", 8000), - expectedViolations: []*storage.Alert_Violation{podPortForwardViolationMsg("p1", 8000)}, - }, - { - policyName: "Kubernetes Actions: Port Forward to Pod", - event: &storage.KubernetesEvent{ - Object: &storage.KubernetesEvent_Object{ - Name: "p1", - Resource: storage.KubernetesEvent_Object_PODS_PORTFORWARD, - }, - ObjectArgs: &storage.KubernetesEvent_PodPortForwardArgs_{ - PodPortForwardArgs: &storage.KubernetesEvent_PodPortForwardArgs{ - Ports: []int32{8000}, - }, - }, - }, - expectedViolations: []*storage.Alert_Violation{podPortForwardViolationMsg("p1", 8000)}, - }, - } { - suite.T().Run(fmt.Sprintf("%s:%s", c.policyName, kubernetes.EventAsString(c.event)), func(t *testing.T) { - policy := suite.MustGetPolicy(c.policyName) - m, err := BuildKubeEventMatcher(policy) - require.NoError(t, err) - - actualViolations, err := m.MatchKubeEvent(nil, c.event, &storage.Deployment{}) - suite.Require().NoError(err) - - assert.Nil(t, actualViolations.ProcessViolation) - if len(c.expectedViolations) == 0 { - for _, a := range actualViolations.AlertViolations { - fmt.Printf("%v", protoutils.NewWrapper(a)) - } - - assert.Nil(t, actualViolations.AlertViolations) - } else { - protoassert.ElementsMatch(t, c.expectedViolations, actualViolations.AlertViolations) - } - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestNetworkBaselinePolicy() { - deployment := fixtures.GetDeployment().CloneVT() - suite.addDepAndImages(deployment) - - // Create a policy for triggering flows that are not in baseline - whitelistGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedNetworkFlowDetected, "true", false) - - policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, whitelistGroup) - m, err := BuildDeploymentWithNetworkFlowMatcher(policy) - suite.NoError(err) - - srcName, dstName, port, protocol := "deployment-name", "ext-source-name", 1, storage.L4Protocol_L4_PROTOCOL_TCP - flow := &augmentedobjs.NetworkFlowDetails{ - SrcEntityName: srcName, - SrcEntityType: storage.NetworkEntityInfo_DEPLOYMENT, - DstEntityName: dstName, - DstEntityType: storage.NetworkEntityInfo_DEPLOYMENT, - DstPort: uint32(port), - L4Protocol: protocol, - NotInNetworkBaseline: true, - LastSeenTimestamp: time.Now(), - } - - violations, err := m.MatchDeploymentWithNetworkFlowInfo(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), flow) - suite.NoError(err) - assertNetworkBaselineMessagesEqual( - suite, - violations.AlertViolations, - []*storage.Alert_Violation{networkBaselineMessage(suite, flow)}) - - // And if the flow is in the baseline, no violations should exist - flow.NotInNetworkBaseline = false - violations, err = m.MatchDeploymentWithNetworkFlowInfo(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), flow) - suite.NoError(err) - suite.Empty(violations) -} - -func (suite *DefaultPoliciesTestSuite) TestReplicasPolicyCriteria() { - for _, testCase := range []struct { - caseName string - replicas int64 - policyValue string - negate bool - alerts []*storage.Alert_Violation - }{ - { - caseName: "Should raise when replicas==5.", - replicas: 5, - policyValue: "5", - negate: false, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, - }, - { - caseName: "Should not raise unless replicas==3.", - replicas: 5, - policyValue: "3", - negate: false, - alerts: nil, - }, - { - caseName: "Should raise unless replicas==3.", - replicas: 5, - policyValue: "3", - negate: true, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, - }, - { - caseName: "Should raise when replicas>=5.", - replicas: 5, - policyValue: ">=5", - negate: false, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, - }, - { - caseName: "Should raise when replicas<=5.", - replicas: 5, - policyValue: "<=5", - negate: false, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, - }, - { - caseName: "Should raise when replicas<5.", - replicas: 1, - policyValue: "<5", - negate: false, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '1'"}}, - }, - { - caseName: "Should raise when replicas>5.", - replicas: 10, - policyValue: ">5", - negate: false, - alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '10'"}}, - }, - } { - suite.Run(testCase.caseName, func() { - deployment := fixtures.GetDeployment().CloneVT() - deployment.Replicas = testCase.replicas - policy := policyWithSingleKeyValue(fieldnames.Replicas, testCase.policyValue, testCase.negate) - - matcher, err := BuildDeploymentMatcher(policy) - suite.NoError(err, "deployment matcher creation must succeed") - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - suite.NoError(err, "deployment matcher run must succeed") - - suite.Empty(violations.ProcessViolation) - protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestLivenessProbePolicyCriteria() { - for _, testCase := range []struct { - caseName string - containers []*storage.Container - policyValue string - alerts []*storage.Alert_Violation - }{ - { - caseName: "Should raise alert since liveness probe is defined.", - containers: []*storage.Container{ - {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: true}}, - }, - policyValue: "true", - alerts: []*storage.Alert_Violation{ - {Message: "Liveness probe is defined for container 'container'"}, - }, - }, - { - caseName: "Should not raise alert since liveness probe is defined.", - containers: []*storage.Container{ - {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: true}}, - }, - policyValue: "false", - alerts: nil, - }, - { - caseName: "Should not raise alert since liveness probe is not defined.", - containers: []*storage.Container{ - {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: false}}, - }, - policyValue: "true", - alerts: nil, - }, - { - caseName: "Should raise alert since liveness probe is not defined.", - containers: []*storage.Container{ - {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Liveness probe is not defined for container 'container'"}, - }, - }, - { - caseName: "Should raise alert for both containers.", - containers: []*storage.Container{ - {Name: "container-1", LivenessProbe: &storage.LivenessProbe{Defined: false}}, - {Name: "container-2", LivenessProbe: &storage.LivenessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Liveness probe is not defined for container 'container-1'"}, - {Message: "Liveness probe is not defined for container 'container-2'"}, - }, - }, - { - caseName: "Should raise alert only for container-2.", - containers: []*storage.Container{ - {Name: "container-1", LivenessProbe: &storage.LivenessProbe{Defined: true}}, - {Name: "container-2", LivenessProbe: &storage.LivenessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Liveness probe is not defined for container 'container-2'"}, - }, - }, - } { - suite.Run(testCase.caseName, func() { - deployment := fixtures.GetDeployment().CloneVT() - deployment.Containers = testCase.containers - policy := policyWithSingleKeyValue(fieldnames.LivenessProbeDefined, testCase.policyValue, false) - - matcher, err := BuildDeploymentMatcher(policy) - suite.NoError(err, "deployment matcher creation must succeed") - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - suite.NoError(err, "deployment matcher run must succeed") - - suite.Empty(violations.ProcessViolation) - protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) - }) - } -} - -func (suite *DefaultPoliciesTestSuite) getViolations(policy *storage.Policy, dep EnhancedDeployment) Violations { - matcher, err := BuildDeploymentMatcher(policy) - suite.NoError(err, "deployment matcher creation must succeed") - violations, err := matcher.MatchDeployment(nil, dep) - suite.NoError(err, "deployment matcher run must succeed") - suite.Empty(violations.ProcessViolation) - return violations -} - -func (suite *DefaultPoliciesTestSuite) TestNetworkPolicyFields() { - testCases := map[string]struct { - netpolsApplied *augmentedobjs.NetworkPoliciesApplied - alerts []*storage.Alert_Violation - }{ - "Missing Ingress Network Policy": { - netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: false, - HasEgressNetworkPolicy: true, - }, - alerts: []*storage.Alert_Violation{ - {Message: "The deployment is missing Ingress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, - }, - }, - "Missing Egress Network Policy": { - netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: true, - HasEgressNetworkPolicy: false, - }, - alerts: []*storage.Alert_Violation{ - {Message: "The deployment is missing Egress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, - }, - }, - "Both policies missing": { - netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: false, - HasEgressNetworkPolicy: false, - }, - alerts: []*storage.Alert_Violation{ - {Message: "The deployment is missing Ingress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, - {Message: "The deployment is missing Egress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, - }, - }, - "No alerts": { - netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: true, - HasEgressNetworkPolicy: true, - }, - alerts: []*storage.Alert_Violation(nil), - }, - "No violations on nil augmentedobj": { - netpolsApplied: nil, - alerts: []*storage.Alert_Violation(nil), - }, - "Policies attached to augmentedobj": { - netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ - HasIngressNetworkPolicy: false, - HasEgressNetworkPolicy: true, - Policies: map[string]*storage.NetworkPolicy{ - "ID1": {Id: "ID1", Name: "policy1"}, - }, - }, - alerts: []*storage.Alert_Violation{ - { - Message: "The deployment is missing Ingress Network Policy.", - Type: storage.Alert_Violation_NETWORK_POLICY, - MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ - KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ - Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ - {Key: printer.PolicyID, Value: "ID1"}, - {Key: printer.PolicyName, Value: "policy1"}, - }, - }, - }, - }, - }, - }, - } - - for name, testCase := range testCases { - suite.Run(name, func() { - deployment := fixtures.GetDeployment().CloneVT() - missingIngressPolicy := policyWithSingleKeyValue(fieldnames.HasIngressNetworkPolicy, "false", false) - missingEgressPolicy := policyWithSingleKeyValue(fieldnames.HasEgressNetworkPolicy, "false", false) - - enhanced := enhancedDeploymentWithNetworkPolicies( - deployment, - suite.getImagesForDeployment(deployment), - testCase.netpolsApplied, - ) - - v1 := suite.getViolations(missingIngressPolicy, enhanced) - v2 := suite.getViolations(missingEgressPolicy, enhanced) - - allAlerts := append(v1.AlertViolations, v2.AlertViolations...) - for i, expected := range testCase.alerts { - suite.Equal(expected.GetType(), allAlerts[i].GetType()) - suite.Equal(expected.GetMessage(), allAlerts[i].GetMessage()) - protoassert.Equal(suite.T(), expected.GetKeyValueAttrs(), allAlerts[i].GetKeyValueAttrs()) - // We do not want to compare time, as the violation timestamp uses now() - suite.NotNil(allAlerts[i].GetTime()) - } - }) - } -} - -func (suite *DefaultPoliciesTestSuite) TestReadinessProbePolicyCriteria() { - for _, testCase := range []struct { - caseName string - containers []*storage.Container - policyValue string - alerts []*storage.Alert_Violation - }{ - { - caseName: "Should raise alert since readiness probe is defined.", - containers: []*storage.Container{ - {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, - }, - policyValue: "true", - alerts: []*storage.Alert_Violation{ - {Message: "Readiness probe is defined for container 'container'"}, - }, - }, - { - caseName: "Should not raise alert since readiness probe is defined.", - containers: []*storage.Container{ - {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, - }, - policyValue: "false", - alerts: nil, - }, - { - caseName: "Should not raise alert since readiness probe is not defined.", - containers: []*storage.Container{ - {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, - }, - policyValue: "true", - alerts: nil, - }, - { - caseName: "Should raise alert since readiness probe is not defined.", - containers: []*storage.Container{ - {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Readiness probe is not defined for container 'container'"}, - }, - }, - { - caseName: "Should raise alert for both containers.", - containers: []*storage.Container{ - {Name: "container-1", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, - {Name: "container-2", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Readiness probe is not defined for container 'container-1'"}, - {Message: "Readiness probe is not defined for container 'container-2'"}, - }, - }, - { - caseName: "Should raise alert only for container-2.", - containers: []*storage.Container{ - {Name: "container-1", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, - {Name: "container-2", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, - }, - policyValue: "false", - alerts: []*storage.Alert_Violation{ - {Message: "Readiness probe is not defined for container 'container-2'"}, - }, - }, - } { - suite.Run(testCase.caseName, func() { - deployment := fixtures.GetDeployment().CloneVT() - deployment.Containers = testCase.containers - policy := policyWithSingleKeyValue(fieldnames.ReadinessProbeDefined, testCase.policyValue, false) - - matcher, err := BuildDeploymentMatcher(policy) - suite.NoError(err, "deployment matcher creation must succeed") - violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) - suite.NoError(err, "deployment matcher run must succeed") - - suite.Empty(violations.ProcessViolation) - protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) - }) - } -} - -func newIndicator(deployment *storage.Deployment, name, args, execFilePath string) *storage.ProcessIndicator { - return &storage.ProcessIndicator{ - Id: uuid.NewV4().String(), - ContainerName: deployment.GetContainers()[0].GetName(), - Signal: &storage.ProcessSignal{ - Name: name, - Args: args, - ExecFilePath: execFilePath, - }, - } -} - -func BenchmarkProcessPolicies(b *testing.B) { - privilegedDep := fixtures.GetDeployment().CloneVT() - privilegedDep.Id = "PRIVILEGED" - images := []*storage.Image{fixtures.GetImage(), fixtures.GetImage()} - - nonPrivilegedDep := fixtures.GetDeployment().CloneVT() - nonPrivilegedDep.Id = "NOTPRIVILEGED" - nonPrivilegedDep.Containers[0].SecurityContext.Privileged = false - - const aptGetKey = "apt-get" - const aptGet2Key = "apt-get2" - const curlKey = "curl" - const bashKey = "bash" - - indicators := make(map[string]map[string]*storage.ProcessIndicator) - for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { - indicators[dep.GetId()] = map[string]*storage.ProcessIndicator{ - aptGetKey: newIndicator(dep, "apt-get", "install nginx", "/bin/apt-get"), - aptGet2Key: newIndicator(dep, "apt-get", "update", "/bin/apt-get"), - curlKey: newIndicator(dep, "curl", "https://stackrox.io", "/bin/curl"), - bashKey: newIndicator(dep, "bash", "attach.sh", "/bin/bash"), - } - } - processesNotInBaseline := map[string]set.StringSet{ - privilegedDep.GetId(): set.NewStringSet(aptGetKey, aptGet2Key, bashKey), - nonPrivilegedDep.GetId(): set.NewStringSet(aptGetKey, curlKey, bashKey), - } - - // Plain groups - aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) - privilegedGroup := policyGroupWithSingleKeyValue(fieldnames.PrivilegedContainer, "true", false) - baselineGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedProcessExecuted, "true", false) - - for _, testCase := range []struct { - groups []*storage.PolicyGroup - - // Deployment ids to indicator keys - expectedMatches map[string][]string - expectedProcessMatches map[string][]string - // Deployment ids to violations - expectedViolations map[string][]*storage.Alert_Violation - }{ - { - groups: []*storage.PolicyGroup{aptGetGroup}, - // only process violation, no alert violation - expectedMatches: map[string][]string{}, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - nonPrivilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - { - groups: []*storage.PolicyGroup{baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, false, "apt-get", "apt-get", "bash"), - nonPrivilegedDep.GetId(): processBaselineMessage(nonPrivilegedDep, true, false, "apt-get", "bash", "curl"), - }, - }, - - { - groups: []*storage.PolicyGroup{privilegedGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, curlKey, bashKey}, - }, - expectedProcessMatches: map[string][]string{}, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get", "curl", "bash"), - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - nonPrivilegedDep.GetId(): {aptGetKey}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, false, "apt-get", "apt-get"), - nonPrivilegedDep.GetId(): processBaselineMessage(nonPrivilegedDep, true, false, "apt-get"), - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - nonPrivilegedDep.GetId(): {aptGetKey}, - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get"), - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - { - groups: []*storage.PolicyGroup{privilegedGroup, baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, true, "apt-get", "apt-get", "bash"), - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, - }, - }, - { - groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup, baselineGroup}, - expectedMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - expectedViolations: map[string][]*storage.Alert_Violation{ - privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, true, "apt-get", "apt-get"), - }, - expectedProcessMatches: map[string][]string{ - privilegedDep.GetId(): {aptGetKey, aptGet2Key}, - }, - }, - } { - c := testCase - b.Run(fmt.Sprintf("%+v", c.groups), func(b *testing.B) { - policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) - m, err := BuildDeploymentWithProcessMatcher(policy) - require.NoError(b, err) - - b.ResetTimer() - for b.Loop() { - for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { - for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { - _, err := m.MatchDeploymentWithProcess(nil, enhancedDeployment(dep, images), indicators[dep.GetId()][key], processesNotInBaseline[dep.GetId()].Contains(key)) - require.NoError(b, err) - } - } - } - }) - } - - policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, aptGetGroup, privilegedGroup, baselineGroup) - m, err := BuildDeploymentWithProcessMatcher(policy) - require.NoError(b, err) - for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { - for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { - indicator := indicators[dep.GetId()][key] - notInBaseline := processesNotInBaseline[dep.GetId()].Contains(key) - b.Run(fmt.Sprintf("benchmark caching: %s/%s", dep.GetId(), key), func(b *testing.B) { - var resNoCaching Violations - b.Run("no caching", func(b *testing.B) { - for b.Loop() { - var err error - resNoCaching, err = m.MatchDeploymentWithProcess(nil, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) - require.NoError(b, err) - } - }) - - var resWithCaching Violations - b.Run("with caching", func(b *testing.B) { - var cache CacheReceptacle - for b.Loop() { - var err error - resWithCaching, err = m.MatchDeploymentWithProcess(&cache, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) - require.NoError(b, err) - } - }) - assertViolations(b, resNoCaching, resWithCaching) - }) - } - } - -} - -func podExecViolationMsg(pod, container, command string) *storage.Alert_Violation { - if command == "" { - return &storage.Alert_Violation{ - Message: fmt.Sprintf("Kubernetes API received exec request into pod '%s' container '%s'", pod, container), - Type: storage.Alert_Violation_K8S_EVENT, - MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ - KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ - Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ - {Key: "pod", Value: pod}, - {Key: "container", Value: container}, - }, - }, - }, - } - } - - return &storage.Alert_Violation{ - Message: fmt.Sprintf("Kubernetes API received exec '%s' request into pod '%s' container '%s'", - command, pod, container), - Type: storage.Alert_Violation_K8S_EVENT, - MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ - KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ - Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ - {Key: "pod", Value: pod}, - {Key: "container", Value: container}, - {Key: "commands", Value: command}, - }, - }, - }, - } -} - -func podPortForwardViolationMsg(pod string, port int) *storage.Alert_Violation { - return &storage.Alert_Violation{ - Message: fmt.Sprintf("Kubernetes API received port forward request to pod '%s' ports '%s'", pod, strconv.Itoa(port)), - Type: storage.Alert_Violation_K8S_EVENT, - MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ - KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ - Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ - {Key: "pod", Value: pod}, - {Key: "ports", Value: strconv.Itoa(port)}, - }, - }, - }, - } -} - -func podExecEvent(pod, container, command string) *storage.KubernetesEvent { - return &storage.KubernetesEvent{ - Object: &storage.KubernetesEvent_Object{ - Name: pod, - Resource: storage.KubernetesEvent_Object_PODS_EXEC, - }, - ObjectArgs: &storage.KubernetesEvent_PodExecArgs_{ - PodExecArgs: &storage.KubernetesEvent_PodExecArgs{ - Container: container, - Commands: []string{command}, - }, - }, - } -} - -func podPortForwardEvent(pod string, port int32) *storage.KubernetesEvent { - return &storage.KubernetesEvent{ - Object: &storage.KubernetesEvent_Object{ - Name: pod, - Resource: storage.KubernetesEvent_Object_PODS_PORTFORWARD, - }, - ObjectArgs: &storage.KubernetesEvent_PodPortForwardArgs_{ - PodPortForwardArgs: &storage.KubernetesEvent_PodPortForwardArgs{ - Ports: []int32{port}, - }, - }, - } -} - -func podAttachEvent(pod, container string) *storage.KubernetesEvent { - return &storage.KubernetesEvent{ - Object: &storage.KubernetesEvent_Object{ - Name: pod, - Resource: storage.KubernetesEvent_Object_PODS_ATTACH, - }, - ObjectArgs: &storage.KubernetesEvent_PodAttachArgs_{ - PodAttachArgs: &storage.KubernetesEvent_PodAttachArgs{ - Container: container, - }, - }, - } -} - -func podAttachViolationMsg(pod, container string) *storage.Alert_Violation { - attrs := []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ - {Key: "pod", Value: pod}, - } - if container != "" { - attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: "container", Value: container}) - } - - message := "Kubernetes API received attach request" - if pod != "" { - message = fmt.Sprintf("Kubernetes API received attach request to pod '%s'", pod) - if container != "" { - message = fmt.Sprintf("Kubernetes API received attach request to pod '%s' container '%s'", pod, container) - } - } - - return &storage.Alert_Violation{ - Message: message, - Type: storage.Alert_Violation_K8S_EVENT, - MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ - KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ - Attrs: attrs, - }, - }, - } -} - -func assertViolations(t testing.TB, expected, actual Violations) { - t.Helper() - protoassert.Equal(t, expected.ProcessViolation, actual.ProcessViolation) - protoassert.SlicesEqual(t, expected.AlertViolations, actual.AlertViolations) -} diff --git a/pkg/booleanpolicy/deployment_policies_test.go b/pkg/booleanpolicy/deployment_policies_test.go deleted file mode 100644 index 3868b2484700f..0000000000000 --- a/pkg/booleanpolicy/deployment_policies_test.go +++ /dev/null @@ -1,1161 +0,0 @@ -package booleanpolicy - -import ( - "testing" - - "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" - "github.com/stackrox/rox/pkg/features" - "github.com/stackrox/rox/pkg/testutils" - "github.com/stackrox/rox/pkg/uuid" - "github.com/stretchr/testify/suite" -) - -type DeploymentDetectionTestSuite struct { - suite.Suite -} - -func TestDeploymentDetection(t *testing.T) { - suite.Run(t, new(DeploymentDetectionTestSuite)) -} - -func (s *DeploymentDetectionTestSuite) TestDeploymentFileAccess() { - deployment := &storage.Deployment{ - Name: "test-deployment", - Id: "test-deployment-id", - } - - type eventWrapper struct { - access *storage.FileAccess - expectAlert bool - } - - for _, tc := range []struct { - description string - policy *storage.Policy - events []eventWrapper - }{ - { - description: "Deployment file open policy with matching event", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file open policy with mismatching event (UNLINK)", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: false, - }, - }, - }, - { - description: "Deployment file open policy with mismatching event (/tmp/foo)", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Deployment file policy with negated file operation", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, true, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: false, // open is the only event we should ignore - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with multiple operations", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: false, - }, - }, - }, - { - description: "Deployment file policy with multiple negated operations", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, true, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with multiple files and single operation", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", "/etc/shadow", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with multiple files and multiple operations", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, - "/etc/passwd", "/etc/shadow", - ), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/tmp/foo", storage.FileAccess_CREATE), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Deployment file policy with no operations", - policy: s.getDeploymentFileAccessPolicy("/etc/passwd"), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with all allowed files", - policy: s.getDeploymentFileAccessPolicy("/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with suffix", - policy: s.getDeploymentFileAccessPolicy("/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), - events: []eventWrapper{ - { - access: s.getDeploymentActualFileAccessEvent("/etc/passwd-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/shadow-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/ssh/sshd_config-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentActualFileAccessEvent("/etc/sudoers-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - } { - testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, true) - defer testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, false) - ResetFieldMetadataSingleton(s.T()) - defer ResetFieldMetadataSingleton(s.T()) - - s.Run(tc.description, func() { - matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) - s.Require().NoError(err) - - for _, event := range tc.events { - var cache CacheReceptacle - enhancedDeployment := EnhancedDeployment{ - Deployment: deployment, - Images: nil, - NetworkPoliciesApplied: nil, - } - violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDeployment, event.access) - s.Require().NoError(err) - - if event.expectAlert { - s.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") - s.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") - - fileAccess := violations.AlertViolations[0].GetFileAccess() - s.Require().NotNil(fileAccess, "expected file access info") - - // Verify the file access details match - s.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) - s.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) - s.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) - } else { - s.Require().Empty(violations.AlertViolations, "expected no alerts") - } - } - }) - } -} - -func (s *DeploymentDetectionTestSuite) TestDeploymentEffectiveFileAccess() { - deployment := &storage.Deployment{ - Name: "test-deployment", - Id: "test-deployment-id", - } - - type eventWrapper struct { - access *storage.FileAccess - expectAlert bool - } - - for _, tc := range []struct { - description string - policy *storage.Policy - events []eventWrapper - }{ - { - description: "Deployment effective file open policy with matching event", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment effective file open policy with mismatching event (UNLINK)", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: false, - }, - }, - }, - { - description: "Deployment effective file open policy with mismatching event (/etc/sudoers)", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Deployment effective file policy with negated file operation", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, true, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: false, // open is the only event we should ignore - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment effective file policy with multiple operations", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: false, - }, - }, - }, - { - description: "Deployment effective file policy with multiple negated operations", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, true, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment effective file policy with multiple files and single operation", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", "/etc/shadow", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment effective file policy with multiple files and multiple operations", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, - "/etc/passwd", "/etc/shadow", - ), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_CREATE), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Deployment effective file policy with no operations", - policy: s.getEffectiveFileAccessPolicy("/etc/passwd"), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), - expectAlert: true, - }, - }, - }, - { - description: "Deployment effective file policy with all allowed files", - policy: s.getEffectiveFileAccessPolicy("/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), - expectAlert: true, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Deployment file policy with suffix", - policy: s.getDeploymentFileAccessPolicy("/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), - events: []eventWrapper{ - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/passwd-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/shadow-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/ssh/sshd_config-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - { - access: s.getDeploymentEffectiveFileAccessEvent("/etc/sudoers-suffix", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - } { - testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, true) - defer testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, false) - ResetFieldMetadataSingleton(s.T()) - defer ResetFieldMetadataSingleton(s.T()) - - s.Run(tc.description, func() { - matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) - s.Require().NoError(err) - - for _, event := range tc.events { - var cache CacheReceptacle - enhancedDeployment := EnhancedDeployment{ - Deployment: deployment, - Images: nil, - NetworkPoliciesApplied: nil, - } - violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDeployment, event.access) - s.Require().NoError(err) - - if event.expectAlert { - s.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") - s.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") - - fileAccess := violations.AlertViolations[0].GetFileAccess() - s.Require().NotNil(fileAccess, "expected file access info") - - // Verify the file access details match - s.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) - s.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) - s.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) - } else { - s.Require().Empty(violations.AlertViolations, "expected no alerts") - } - } - }) - } -} - -func (s *DeploymentDetectionTestSuite) TestDeploymentDualPathMatching() { - deployment := &storage.Deployment{ - Name: "test-deployment", - Id: "test-deployment-id", - } - - type eventWrapper struct { - access *storage.FileAccess - expectAlert bool - } - - for _, tc := range []struct { - description string - policy *storage.Policy - events []eventWrapper - }{ - // Valid test cases - expected behavior - { - description: "Event with both paths - policy matches actual path only", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/passwd", - ), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Event with both paths - policy matches effective only", - policy: s.getEffectiveFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/shadow", - ), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Event with both paths - policy requires BOTH paths (AND within section)", - policy: s.getDualPathPolicy("/etc/passwd", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Multi-section policy - first section matches (OR behavior)", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "OPEN"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "OPEN"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Multi-section policy - second section matches (OR behavior)", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "OPEN"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "OPEN"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Multi-section with mixed path types - actual path section matches", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Multi-section with mixed path types - effective path section matches", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Multi-section with dual paths in one section - complex AND/OR", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{ - {Value: "/etc/passwd"}, - {Value: "/etc/shadow"}, - }, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - // Matches section 1 (both actual and effective paths match) - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - - // Invalid/edge cases - unexpected behaviors - { - description: "Event with both paths - policy matches neither", - policy: s.getDeploymentFileAccessPolicyWithOperations( - []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, - "/etc/sudoers", - ), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Event with both paths - policy requires EITHER and only actual path matches", - policy: s.getDualPathPolicy("/etc/passwd", "/etc/sudoers", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Event with both paths - policy requires EITHER and only effective matches", - policy: s.getDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Event with both paths - policy requires EITHER and only BOTH match", - policy: s.getDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/sudoers", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: true, - }, - }, - }, - { - description: "Event with both paths - policy requires BOTH but operation doesn't match", - policy: s.getDualPathPolicy("/etc/passwd", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_CREATE}), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Multi-section policy - no sections match", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - { - description: "Multi-section with dual paths - neither section matches completely", - policy: s.getMultiSectionPolicy([]*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/passwd"}, {Value: "/etc/shadow"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "UNLINK"}}, - }, - }, - }, - { - SectionName: "section 2", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: "/etc/shadow"}, {Value: "/etc/ssh/sshd_config"}}, - }, - { - FieldName: fieldnames.FileOperation, - Values: []*storage.PolicyValue{{Value: "UNLINK"}}, - }, - }, - }, - }), - events: []eventWrapper{ - { - // Section 1: actual matches, effective doesn't (AND fails) - // Section 2: actual doesn't match, effective does (AND fails) - // Overall: no section fully matches (OR fails) - access: s.getDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), - expectAlert: false, - }, - }, - }, - } { - testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, true) - defer testutils.MustUpdateFeature(s.T(), features.SensitiveFileActivity, false) - ResetFieldMetadataSingleton(s.T()) - defer ResetFieldMetadataSingleton(s.T()) - - s.Run(tc.description, func() { - matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) - s.Require().NoError(err) - - for _, event := range tc.events { - var cache CacheReceptacle - enhancedDeployment := EnhancedDeployment{ - Deployment: deployment, - Images: nil, - NetworkPoliciesApplied: nil, - } - violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDeployment, event.access) - s.Require().NoError(err) - - if event.expectAlert { - s.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") - s.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") - - fileAccess := violations.AlertViolations[0].GetFileAccess() - s.Require().NotNil(fileAccess, "expected file access info") - - // Verify the file access details match - s.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) - s.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) - s.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) - } else { - s.Require().Empty(violations.AlertViolations, "expected no alerts") - } - } - }) - } -} - -// getFileAccessEvent is a generic helper for creating file access events. -func (s *DeploymentDetectionTestSuite) getFileAccessEvent(path string, operation storage.FileAccess_Operation, isActualPath bool) *storage.FileAccess { - file := &storage.FileAccess_File{} - if isActualPath { - file.ActualPath = path - } else { - file.EffectivePath = path - } - return &storage.FileAccess{ - File: file, - Operation: operation, - } -} - -func (s *DeploymentDetectionTestSuite) getDeploymentActualFileAccessEvent(path string, operation storage.FileAccess_Operation) *storage.FileAccess { - return s.getFileAccessEvent(path, operation, true) -} - -func (s *DeploymentDetectionTestSuite) getDeploymentEffectiveFileAccessEvent(path string, operation storage.FileAccess_Operation) *storage.FileAccess { - return s.getFileAccessEvent(path, operation, false) -} - -// getFileAccessPolicy is a generic helper for creating file access policies. -func (s *DeploymentDetectionTestSuite) getFileAccessPolicy(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - var pathValues []*storage.PolicyValue - for _, path := range paths { - pathValues = append(pathValues, &storage.PolicyValue{ - Value: path, - }) - } - - policyGroups := []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: pathValues, - }, - } - - var operationValues []*storage.PolicyValue - for _, op := range operations { - operationValues = append(operationValues, &storage.PolicyValue{ - Value: op.String(), - }) - } - - if len(operationValues) != 0 { - policyGroups = append(policyGroups, &storage.PolicyGroup{ - FieldName: fieldnames.FileOperation, - Values: operationValues, - Negate: negate, - }) - } - - return &storage.Policy{ - Id: uuid.NewV4().String(), - PolicyVersion: "1.1", - Name: "Sensitive File Access in Deployment", - Severity: storage.Severity_HIGH_SEVERITY, - Categories: []string{"File System"}, - PolicySections: []*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: policyGroups, - }, - }, - LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, - EventSource: storage.EventSource_DEPLOYMENT_EVENT, - } -} - -func (s *DeploymentDetectionTestSuite) getDeploymentFileAccessPolicyWithOperations(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - return s.getFileAccessPolicy(operations, negate, paths...) -} - -func (s *DeploymentDetectionTestSuite) getDeploymentFileAccessPolicy(paths ...string) *storage.Policy { - return s.getFileAccessPolicy(nil, false, paths...) -} - -func (s *DeploymentDetectionTestSuite) getEffectiveFileAccessPolicyWithOperations(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - return s.getFileAccessPolicy(operations, negate, paths...) -} - -func (s *DeploymentDetectionTestSuite) getEffectiveFileAccessPolicy(paths ...string) *storage.Policy { - return s.getFileAccessPolicy(nil, false, paths...) -} - -// Helper to create file access events with BOTH actual path and effective path populated -func (s *DeploymentDetectionTestSuite) getDualPathFileAccessEvent(actualPath, effectivePath string, operation storage.FileAccess_Operation) *storage.FileAccess { - return &storage.FileAccess{ - File: &storage.FileAccess_File{ - ActualPath: actualPath, - EffectivePath: effectivePath, - }, - Operation: operation, - } -} - -// Helper to create a policy with both ActualPath AND EffectivePath in the same section (AND behavior) -func (s *DeploymentDetectionTestSuite) getDualPathPolicy(actualPath, effectivePath string, operations []storage.FileAccess_Operation) *storage.Policy { - policyGroups := []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: []*storage.PolicyValue{{Value: actualPath}, {Value: effectivePath}}, - }, - } - - if len(operations) > 0 { - var operationValues []*storage.PolicyValue - for _, op := range operations { - operationValues = append(operationValues, &storage.PolicyValue{ - Value: op.String(), - }) - } - policyGroups = append(policyGroups, &storage.PolicyGroup{ - FieldName: fieldnames.FileOperation, - Values: operationValues, - }) - } - - return &storage.Policy{ - Id: uuid.NewV4().String(), - PolicyVersion: "1.1", - Name: "Dual Path Policy", - Severity: storage.Severity_HIGH_SEVERITY, - Categories: []string{"File System"}, - PolicySections: []*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: policyGroups, - }, - }, - LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, - EventSource: storage.EventSource_DEPLOYMENT_EVENT, - } -} - -// Helper to create a multi-section policy (OR behavior across sections) -func (s *DeploymentDetectionTestSuite) getMultiSectionPolicy(sections []*storage.PolicySection) *storage.Policy { - return &storage.Policy{ - Id: uuid.NewV4().String(), - PolicyVersion: "1.1", - Name: "Multi-Section Policy", - Severity: storage.Severity_HIGH_SEVERITY, - Categories: []string{"File System"}, - PolicySections: sections, - LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, - EventSource: storage.EventSource_DEPLOYMENT_EVENT, - } -} diff --git a/pkg/booleanpolicy/image_criteria_test.go b/pkg/booleanpolicy/image_criteria_test.go new file mode 100644 index 0000000000000..1bc683547af64 --- /dev/null +++ b/pkg/booleanpolicy/image_criteria_test.go @@ -0,0 +1,522 @@ +package booleanpolicy + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/booleanpolicy/violationmessages/printer" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/fixtures" + "github.com/stackrox/rox/pkg/images/types" + "github.com/stackrox/rox/pkg/protocompat" + "github.com/stackrox/rox/pkg/set" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestImageCriteria(t *testing.T) { + t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") + suite.Run(t, new(ImageCriteriaTestSuite)) +} + +type ImageCriteriaTestSuite struct { + basePoliciesTestSuite +} + +func (suite *ImageCriteriaTestSuite) TestNVDCVSSCriteria() { + heartbleedDep := &storage.Deployment{ + Id: "HEARTBLEEDDEPID", + Containers: []*storage.Container{ + { + Name: "nginx", + SecurityContext: &storage.SecurityContext{Privileged: true}, + Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, + }, + }, + } + + ts := time.Now().AddDate(0, 0, -5) + protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) + require.NoError(suite.T(), err) + + suite.addDepAndImages(heartbleedDep, &storage.Image{ + Id: "HEARTBLEEDDEPSHA", + Name: &storage.ImageName{FullName: "heartbleed"}, + Scan: &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, NvdCvss: 8, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, + FirstImageOccurrence: protoTs}, + }}, + }, + }, + }) + + nvdCvssPolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.NvdCvss, + Values: []*storage.PolicyValue{ + { + Value: "> 6", + }, + }, + } + + policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, nvdCvssPolicyGroup) + + deployment := suite.deployments["HEARTBLEEDDEPID"] + depMatcher, err := BuildDeploymentMatcher(policy) + require.NoError(suite.T(), err) + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + require.Len(suite.T(), violations.AlertViolations, 1) + require.NoError(suite.T(), err) + require.Contains(suite.T(), violations.AlertViolations[0].GetMessage(), "NVD CVSS") + +} + +func (suite *ImageCriteriaTestSuite) TestFixableAndImageFirstOccurenceCriteria() { + heartbleedDep := &storage.Deployment{ + Id: "HEARTBLEEDDEPID", + Containers: []*storage.Container{ + { + Name: "nginx", + SecurityContext: &storage.SecurityContext{Privileged: true}, + Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, + }, + }, + } + + ts := time.Now().AddDate(0, 0, -5) + protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) + require.NoError(suite.T(), err) + + suite.addDepAndImages(heartbleedDep, &storage.Image{ + Id: "HEARTBLEEDDEPSHA", + Name: &storage.ImageName{FullName: "heartbleed"}, + Scan: &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, + FirstImageOccurrence: protoTs}, + }}, + }, + }, + }) + + fixablePolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.Fixable, + Values: []*storage.PolicyValue{{Value: "true"}}, + } + firstImageOccurrenceGroup := &storage.PolicyGroup{ + FieldName: fieldnames.DaysSinceImageFirstDiscovered, + Values: []*storage.PolicyValue{{Value: "2"}}, + } + + policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, firstImageOccurrenceGroup) + + deployment := suite.deployments["HEARTBLEEDDEPID"] + depMatcher, err := BuildDeploymentMatcher(policy) + require.NoError(suite.T(), err) + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + require.Len(suite.T(), violations.AlertViolations, 1) + require.NoError(suite.T(), err) + +} + +func (suite *ImageCriteriaTestSuite) TestFixableAndFixTimestampAvailableCriteria() { + heartbleedDep := &storage.Deployment{ + Id: "HEARTBLEEDDEPID", + Containers: []*storage.Container{ + { + Name: "nginx", + SecurityContext: &storage.SecurityContext{Privileged: true}, + Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, + }, + }, + } + + ts := time.Now().AddDate(0, 0, -5) + protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) + require.NoError(suite.T(), err) + + suite.addDepAndImages(heartbleedDep, &storage.Image{ + Id: "HEARTBLEEDDEPSHA", + Name: &storage.ImageName{FullName: "heartbleed"}, + Scan: &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, + FixAvailableTimestamp: protoTs}, + }}, + }, + }, + }) + + fixablePolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.Fixable, + Values: []*storage.PolicyValue{{Value: "true"}}, + } + fixTimestampAvailableGroup := &storage.PolicyGroup{ + FieldName: fieldnames.DaysSinceFixAvailable, + Values: []*storage.PolicyValue{{Value: "2"}}, + } + + policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, fixTimestampAvailableGroup) + + deployment := suite.deployments["HEARTBLEEDDEPID"] + depMatcher, err := BuildDeploymentMatcher(policy) + require.NoError(suite.T(), err) + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + require.Len(suite.T(), violations.AlertViolations, 1) + require.NoError(suite.T(), err) + +} + +func (suite *ImageCriteriaTestSuite) TestDaysSinceCVEPublishedCriteria() { + heartbleedDep := &storage.Deployment{ + Id: "HEARTBLEEDDEPID", + Containers: []*storage.Container{ + { + Name: "nginx", + SecurityContext: &storage.SecurityContext{Privileged: true}, + Image: &storage.ContainerImage{Id: "HEARTBLEEDDEPSHA"}, + }, + }, + } + + ts := time.Now().AddDate(0, 0, -5) + protoTs, err := protocompat.ConvertTimeToTimestampOrError(ts) + require.NoError(suite.T(), err) + + suite.addDepAndImages(heartbleedDep, &storage.Image{ + Id: "HEARTBLEEDDEPSHA", + Name: &storage.ImageName{FullName: "heartbleed"}, + Scan: &storage.ImageScan{ + Components: []*storage.EmbeddedImageScanComponent{ + {Name: "heartbleed", Version: "1.2", Vulns: []*storage.EmbeddedVulnerability{ + {Cve: "CVE-2014-0160", Link: "https://heartbleed", Cvss: 6, SetFixedBy: &storage.EmbeddedVulnerability_FixedBy{FixedBy: "v1.2"}, + PublishedOn: protoTs}, + }}, + }, + }, + }) + + fixablePolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.Fixable, + Values: []*storage.PolicyValue{{Value: "true"}}, + } + cvePublishedGroup := &storage.PolicyGroup{ + FieldName: fieldnames.DaysSincePublished, + Values: []*storage.PolicyValue{{Value: "2"}}, + } + + policy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, fixablePolicyGroup, cvePublishedGroup) + + deployment := suite.deployments["HEARTBLEEDDEPID"] + depMatcher, err := BuildDeploymentMatcher(policy) + require.NoError(suite.T(), err) + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + require.Len(suite.T(), violations.AlertViolations, 1) + require.NoError(suite.T(), err) + +} + +func (suite *ImageCriteriaTestSuite) TestImageOS() { + depToImg := make(map[*storage.Deployment]*storage.Image) + for _, imgName := range []string{ + "unknown", + "alpine:v3.4", + "alpine:v3.11", + "ubuntu:20.04", + "debian:8", + "debian:10", + } { + img := imageWithOS(imgName) + dep := fixtures.GetDeployment().CloneVT() + dep.Containers = []*storage.Container{ + { + Name: imgName, + Image: types.ToContainerImage(img), + }, + } + depToImg[dep] = img + } + + for _, testCase := range []struct { + value string + expectedMatches []string + }{ + { + value: "unknown", + expectedMatches: []string{"unknown"}, + }, + { + value: "alpine", + expectedMatches: []string{}, + }, + { + value: "alpine.*", + expectedMatches: []string{"alpine:v3.4", "alpine:v3.11"}, + }, + { + value: "debian:8", + expectedMatches: []string{"debian:8"}, + }, + { + value: "centos", + expectedMatches: nil, + }, + } { + c := testCase + + suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { + depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.ImageOS, c.value, false)) + require.NoError(t, err) + depMatched := set.NewStringSet() + for dep, img := range depToImg { + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, []*storage.Image{img})) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + depMatched.Add(img.GetScan().GetOperatingSystem()) + require.Len(t, violations.AlertViolations, 1) + assert.Equal(t, fmt.Sprintf("Container '%s' has image with base OS '%s'", dep.GetContainers()[0].GetName(), img.GetScan().GetOperatingSystem()), violations.AlertViolations[0].GetMessage()) + } + } + assert.ElementsMatch(t, depMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", depMatched.AsSlice(), c.value, c.expectedMatches) + }) + + suite.T().Run(fmt.Sprintf("ImageMatcher %+v", c), func(t *testing.T) { + imgMatcher, err := BuildImageMatcher(policyWithSingleKeyValue(fieldnames.ImageOS, c.value, false)) + require.NoError(t, err) + imgMatched := set.NewStringSet() + for _, img := range depToImg { + violations, err := imgMatcher.MatchImage(nil, img) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + imgMatched.Add(img.GetScan().GetOperatingSystem()) + require.Len(t, violations.AlertViolations, 1) + assert.Equal(t, fmt.Sprintf("Image has base OS '%s'", img.GetScan().GetOperatingSystem()), violations.AlertViolations[0].GetMessage()) + } + } + assert.ElementsMatch(t, imgMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", imgMatched.AsSlice(), c.value, c.expectedMatches) + }) + } +} + +func (suite *ImageCriteriaTestSuite) TestImageVerified() { + const ( + verifier0 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000001" + verifier1 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000002" + verifier2 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000003" + verifier3 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000004" + unverifier = "io.stackrox.signatureintegration.00000000-0000-0000-0000-00000000000F" + ) + + var images = []*storage.Image{ + suite.imageWithSignatureVerificationResults("image_no_results", []*storage.ImageSignatureVerificationResult{{}}), + suite.imageWithSignatureVerificationResults("image_empty_results", []*storage.ImageSignatureVerificationResult{{ + VerifierId: "", + Status: storage.ImageSignatureVerificationResult_UNSET, + }}), + suite.imageWithSignatureVerificationResults("image_nil_results", nil), + suite.imageWithSignatureVerificationResults("verified_by_0", []*storage.ImageSignatureVerificationResult{{ + VerifierId: verifier0, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"verified_by_0"}, + }}), + suite.imageWithSignatureVerificationResults("unverified_image", []*storage.ImageSignatureVerificationResult{{ + VerifierId: unverifier, + Status: storage.ImageSignatureVerificationResult_UNSET, + }}), + suite.imageWithSignatureVerificationResults("verified_by_3", []*storage.ImageSignatureVerificationResult{{ + VerifierId: verifier2, + Status: storage.ImageSignatureVerificationResult_FAILED_VERIFICATION, + }, { + VerifierId: verifier3, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"verified_by_3"}, + }}), + suite.imageWithSignatureVerificationResults("verified_by_2_and_3", []*storage.ImageSignatureVerificationResult{{ + VerifierId: verifier2, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"verified_by_2_and_3"}, + }, { + VerifierId: verifier3, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"verified_by_2_and_3"}, + }}), + } + + var allImages set.FrozenStringSet + { + ai := set.NewStringSet() + for _, img := range images { + ai.Add(img.GetName().GetFullName()) + } + allImages = ai.Freeze() + } + + getViolationMessage := func(img *storage.Image) string { + message := strings.Builder{} + message.WriteString("Image signature is not verified by the specified signature integration(s)") + successfulVerifierIDs := []string{} + for _, r := range img.GetSignatureVerificationData().GetResults() { + if r.GetVerifierId() != "" && r.GetStatus() == storage.ImageSignatureVerificationResult_VERIFIED { + successfulVerifierIDs = append(successfulVerifierIDs, r.GetVerifierId()) + } + } + if len(successfulVerifierIDs) > 0 { + message.WriteString(fmt.Sprintf(" (it is verified by other integration(s): %s)", printer.StringSliceToSortedSentence(successfulVerifierIDs))) + } + message.WriteString(".") + return message.String() + } + + suite.Run("Test disallowed AND operator", func() { + _, err := BuildImageMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, + []string{verifier0}, false, storage.BooleanOperator_AND)) + suite.EqualError(err, + "policy validation error: operator AND is not allowed for field \"Image Signature Verified By\"") + }) + + for i, testCase := range []struct { + values []string + expectedMatches set.FrozenStringSet + }{ + { + values: []string{unverifier}, + expectedMatches: allImages, + }, + { + values: []string{verifier0}, + expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_0")), + }, + { + values: []string{verifier1}, + expectedMatches: allImages, + }, + { + values: []string{verifier2}, + expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_2_and_3")), + }, + { + values: []string{verifier3}, + expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_3", "verified_by_2_and_3")), + }, + { + values: []string{verifier0, verifier2}, + expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_0", "verified_by_2_and_3")), + }, + { + values: []string{verifier2, verifier3}, + expectedMatches: allImages.Difference(set.NewFrozenStringSet("verified_by_3", "verified_by_2_and_3")), + }, + } { + c := testCase + + suite.Run(fmt.Sprintf("ImageMatcher %d: %+v", i, c), func() { + imgMatcher, err := BuildImageMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, + c.values, false, storage.BooleanOperator_OR)) + suite.NoError(err) + matchedImages := set.NewStringSet() + for _, img := range images { + violations, err := imgMatcher.MatchImage(nil, img) + suite.NoError(err) + if len(violations.AlertViolations) == 0 { + continue + } + matchedImages.Add(img.GetName().GetFullName()) + suite.Truef(c.expectedMatches.Contains(img.GetName().GetFullName()), "Image %q should not match", + img.GetName().GetFullName()) + + for _, violation := range violations.AlertViolations { + suite.Equal(getViolationMessage(img), violation.GetMessage()) + } + } + suite.True(c.expectedMatches.Difference(matchedImages.Freeze()).IsEmpty(), matchedImages) + }) + } +} + +func (suite *ImageCriteriaTestSuite) TestImageVerified_WithDeployment() { + const ( + verifier1 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000002" + verifier2 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000003" + verifier3 = "io.stackrox.signatureintegration.00000000-0000-0000-0000-000000000004" + ) + + imgVerifiedAndMatchingReference := suite.imageWithSignatureVerificationResults("image_verified_by_1", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: verifier1, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"image_verified_by_1"}, + }, + }) + + imgVerifiedAndMatchingMultipleReferences := suite.imageWithSignatureVerificationResults("image_verified_by_2", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: verifier3, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"image_with_alternative_verified_reference", "image_verified_by_2"}, + }, + }) + + imgVerifiedButNotMatchingReference := suite.imageWithSignatureVerificationResults("image_with_alternative_verified_reference", + []*storage.ImageSignatureVerificationResult{ + { + VerifierId: verifier2, + Status: storage.ImageSignatureVerificationResult_VERIFIED, + VerifiedImageReferences: []string{"image_verified_by_2"}, + }, + }) + + cases := map[string]struct { + deployment *storage.Deployment + image *storage.Image + matchingVerifier string + expectViolation bool + }{ + "deployment with matching verified image reference shouldn't lead in alert message": { + deployment: deploymentWithImage("deployment_with_image_verified_by_1", imgVerifiedAndMatchingReference), + image: imgVerifiedAndMatchingReference, + matchingVerifier: verifier1, + }, + "deployment with verified result but no matching verified image reference should lead to alert message": { + deployment: deploymentWithImage("deployment_with_image_alternative_verified_reference", imgVerifiedButNotMatchingReference), + image: imgVerifiedButNotMatchingReference, + matchingVerifier: verifier2, + expectViolation: true, + }, + "deployment with verified result and multiple matching verified image references shouldn't lead to alert message": { + deployment: deploymentWithImage("deployment_with_image_verified_by_2", imgVerifiedAndMatchingMultipleReferences), + image: imgVerifiedAndMatchingMultipleReferences, + matchingVerifier: verifier3, + }, + } + + for name, c := range cases { + suite.Run(name, func() { + deploymentMatcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.ImageSignatureVerifiedBy, + []string{c.matchingVerifier}, false, storage.BooleanOperator_OR)) + suite.Require().NoError(err) + + violations, err := deploymentMatcher.MatchDeployment(nil, EnhancedDeployment{ + Deployment: c.deployment, + Images: []*storage.Image{c.image}, + }) + suite.Require().NoError(err) + + if c.expectViolation { + suite.NotEmpty(violations.AlertViolations) + } else { + suite.Empty(violations.AlertViolations) + } + }) + } +} diff --git a/pkg/booleanpolicy/network_criteria_test.go b/pkg/booleanpolicy/network_criteria_test.go new file mode 100644 index 0000000000000..0248c6b0bf565 --- /dev/null +++ b/pkg/booleanpolicy/network_criteria_test.go @@ -0,0 +1,180 @@ +package booleanpolicy + +import ( + "testing" + "time" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/augmentedobjs" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/booleanpolicy/violationmessages/printer" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/fixtures" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +func TestNetworkCriteria(t *testing.T) { + t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") + suite.Run(t, new(NetworkCriteriaTestSuite)) +} + +type NetworkCriteriaTestSuite struct { + basePoliciesTestSuite +} + +func networkBaselineMessage(t testing.TB, flow *augmentedobjs.NetworkFlowDetails) *storage.Alert_Violation { + violation, err := printer.GenerateNetworkFlowViolation(flow) + assert.Nil(t, err) + return violation +} + +func assertNetworkBaselineMessagesEqual(t testing.TB, this, that []*storage.Alert_Violation) { + thisWithoutTime := make([]*storage.Alert_Violation, 0, len(this)) + thatWithoutTime := make([]*storage.Alert_Violation, 0, len(that)) + for _, violation := range this { + cp := violation.CloneVT() + cp.Time = nil + thisWithoutTime = append(thisWithoutTime, cp) + } + for _, violation := range that { + cp := violation.CloneVT() + cp.Time = nil + thatWithoutTime = append(thatWithoutTime, cp) + } + protoassert.ElementsMatch(t, thisWithoutTime, thatWithoutTime) +} + +func (suite *NetworkCriteriaTestSuite) TestNetworkBaselinePolicy() { + deployment := fixtures.GetDeployment().CloneVT() + suite.addDepAndImages(deployment) + + // Create a policy for triggering flows that are not in baseline + whitelistGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedNetworkFlowDetected, "true", false) + + policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, whitelistGroup) + m, err := BuildDeploymentWithNetworkFlowMatcher(policy) + suite.NoError(err) + + srcName, dstName, port, protocol := "deployment-name", "ext-source-name", 1, storage.L4Protocol_L4_PROTOCOL_TCP + flow := &augmentedobjs.NetworkFlowDetails{ + SrcEntityName: srcName, + SrcEntityType: storage.NetworkEntityInfo_DEPLOYMENT, + DstEntityName: dstName, + DstEntityType: storage.NetworkEntityInfo_DEPLOYMENT, + DstPort: uint32(port), + L4Protocol: protocol, + NotInNetworkBaseline: true, + LastSeenTimestamp: time.Now(), + } + + violations, err := m.MatchDeploymentWithNetworkFlowInfo(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), flow) + suite.NoError(err) + assertNetworkBaselineMessagesEqual( + suite.T(), + violations.AlertViolations, + []*storage.Alert_Violation{networkBaselineMessage(suite.T(), flow)}) + + // And if the flow is in the baseline, no violations should exist + flow.NotInNetworkBaseline = false + violations, err = m.MatchDeploymentWithNetworkFlowInfo(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment)), flow) + suite.NoError(err) + suite.Empty(violations) +} + +func (suite *NetworkCriteriaTestSuite) TestNetworkPolicyFields() { + testCases := map[string]struct { + netpolsApplied *augmentedobjs.NetworkPoliciesApplied + alerts []*storage.Alert_Violation + }{ + "Missing Ingress Network Policy": { + netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: false, + HasEgressNetworkPolicy: true, + }, + alerts: []*storage.Alert_Violation{ + {Message: "The deployment is missing Ingress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, + }, + }, + "Missing Egress Network Policy": { + netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: true, + HasEgressNetworkPolicy: false, + }, + alerts: []*storage.Alert_Violation{ + {Message: "The deployment is missing Egress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, + }, + }, + "Both policies missing": { + netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: false, + HasEgressNetworkPolicy: false, + }, + alerts: []*storage.Alert_Violation{ + {Message: "The deployment is missing Ingress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, + {Message: "The deployment is missing Egress Network Policy.", Type: storage.Alert_Violation_NETWORK_POLICY}, + }, + }, + "No alerts": { + netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: true, + HasEgressNetworkPolicy: true, + }, + alerts: []*storage.Alert_Violation(nil), + }, + "No violations on nil augmentedobj": { + netpolsApplied: nil, + alerts: []*storage.Alert_Violation(nil), + }, + "Policies attached to augmentedobj": { + netpolsApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: false, + HasEgressNetworkPolicy: true, + Policies: map[string]*storage.NetworkPolicy{ + "ID1": {Id: "ID1", Name: "policy1"}, + }, + }, + alerts: []*storage.Alert_Violation{ + { + Message: "The deployment is missing Ingress Network Policy.", + Type: storage.Alert_Violation_NETWORK_POLICY, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: printer.PolicyID, Value: "ID1"}, + {Key: printer.PolicyName, Value: "policy1"}, + }, + }, + }, + }, + }, + }, + } + + for name, testCase := range testCases { + suite.Run(name, func() { + deployment := fixtures.GetDeployment().CloneVT() + missingIngressPolicy := policyWithSingleKeyValue(fieldnames.HasIngressNetworkPolicy, "false", false) + missingEgressPolicy := policyWithSingleKeyValue(fieldnames.HasEgressNetworkPolicy, "false", false) + + enhanced := enhancedDeploymentWithNetworkPolicies( + deployment, + suite.getImagesForDeployment(deployment), + testCase.netpolsApplied, + ) + + v1 := suite.getViolations(missingIngressPolicy, enhanced) + v2 := suite.getViolations(missingEgressPolicy, enhanced) + + allAlerts := append(v1.AlertViolations, v2.AlertViolations...) + for i, expected := range testCase.alerts { + suite.Equal(expected.GetType(), allAlerts[i].GetType()) + suite.Equal(expected.GetMessage(), allAlerts[i].GetMessage()) + protoassert.Equal(suite.T(), expected.GetKeyValueAttrs(), allAlerts[i].GetKeyValueAttrs()) + // We do not want to compare time, as the violation timestamp uses now() + suite.NotNil(allAlerts[i].GetTime()) + } + }) + } +} diff --git a/pkg/booleanpolicy/node_policies_test.go b/pkg/booleanpolicy/node_criteria_test.go similarity index 53% rename from pkg/booleanpolicy/node_policies_test.go rename to pkg/booleanpolicy/node_criteria_test.go index ac1443ee8c03b..c4a574c39ed12 100644 --- a/pkg/booleanpolicy/node_policies_test.go +++ b/pkg/booleanpolicy/node_criteria_test.go @@ -4,22 +4,20 @@ import ( "testing" "github.com/stackrox/rox/generated/storage" - "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" "github.com/stackrox/rox/pkg/features" "github.com/stackrox/rox/pkg/testutils" - "github.com/stackrox/rox/pkg/uuid" "github.com/stretchr/testify/suite" ) -type NodeDetectionTestSuite struct { +type NodeCriteriaTestSuite struct { suite.Suite } -func TestNodeDetection(t *testing.T) { - suite.Run(t, new(NodeDetectionTestSuite)) +func TestNodeCriteria(t *testing.T) { + suite.Run(t, new(NodeCriteriaTestSuite)) } -func (s *NodeDetectionTestSuite) TestNodeFileAccess() { +func (s *NodeCriteriaTestSuite) TestNodeFileAccess() { node := &storage.Node{ Name: "test-node-1", Id: "test-node-1", @@ -37,235 +35,235 @@ func (s *NodeDetectionTestSuite) TestNodeFileAccess() { }{ { description: "Node file open policy with matching event", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, }, }, { description: "Node file open policy with mismatching event (UNLINK)", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), expectAlert: false, }, }, }, { description: "Node file open policy with mismatching event (/tmp/foo)", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), expectAlert: false, }, }, }, { description: "Node file policy with negated file operation", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN}, true, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: false, // open is the only event we should ignore }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), expectAlert: true, }, }, }, { description: "Node file policy with multiple operations", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), expectAlert: false, }, }, }, { description: "Node file policy with multiple negated operations", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, true, "/etc/passwd", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: false, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), expectAlert: false, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), expectAlert: true, }, }, }, { description: "Node file policy with multiple files and single operation", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, "/etc/passwd", "/etc/shadow", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), expectAlert: true, }, }, }, { description: "Node file policy with multiple files and multiple operations", - policy: s.getNodeFileAccessPolicyWithOperations( + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, "/etc/passwd", "/etc/shadow", ), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/tmp/foo", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_CREATE), expectAlert: false, }, { - access: s.getNodeFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), expectAlert: false, }, }, }, { description: "Node file policy with no operations", - policy: s.getNodeFileAccessPolicy("/etc/passwd"), + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, nil, false, "/etc/passwd"), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), expectAlert: true, }, }, }, { description: "Node file policy with all allowed files", - policy: s.getNodeFileAccessPolicy("/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, nil, false, "/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), events: []eventWrapper{ { - access: s.getNodeFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), expectAlert: true, }, { - access: s.getNodeFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), + access: newActualFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), expectAlert: true, }, }, }, { description: "Node file policy with event containing both matching paths", - policy: s.getNodeFileAccessPolicy("/etc/passwd"), + policy: newFileAccessPolicy(storage.EventSource_NODE_EVENT, nil, false, "/etc/passwd"), events: []eventWrapper{ { access: &storage.FileAccess{ @@ -312,82 +310,3 @@ func (s *NodeDetectionTestSuite) TestNodeFileAccess() { }) } } - -func (s *NodeDetectionTestSuite) getNodeFileAccessEvent(path string, operation storage.FileAccess_Operation) *storage.FileAccess { - return &storage.FileAccess{ - File: &storage.FileAccess_File{ActualPath: path}, - Operation: operation, - } -} - -func (s *NodeDetectionTestSuite) getNodeFileAccessPolicyWithOperations(operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { - var pathValues []*storage.PolicyValue - for _, path := range paths { - pathValues = append(pathValues, &storage.PolicyValue{ - Value: path, - }) - } - - var operationValues []*storage.PolicyValue - for _, op := range operations { - operationValues = append(operationValues, &storage.PolicyValue{ - Value: op.String(), - }) - } - - return &storage.Policy{ - Id: uuid.NewV4().String(), - PolicyVersion: "1.1", - Name: "Sensitive File Access on Node", - Severity: storage.Severity_HIGH_SEVERITY, - Categories: []string{"File System"}, - PolicySections: []*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: pathValues, - }, - { - FieldName: fieldnames.FileOperation, - Values: operationValues, - Negate: negate, - }, - }, - }, - }, - LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, - EventSource: storage.EventSource_NODE_EVENT, - } -} - -func (s *NodeDetectionTestSuite) getNodeFileAccessPolicy(paths ...string) *storage.Policy { - var policyValues []*storage.PolicyValue - for _, path := range paths { - policyValues = append(policyValues, &storage.PolicyValue{ - Value: path, - }) - } - - return &storage.Policy{ - Id: uuid.NewV4().String(), - PolicyVersion: "1.1", - Name: "Sensitive File Access on Node", - Severity: storage.Severity_HIGH_SEVERITY, - Categories: []string{"File System"}, - PolicySections: []*storage.PolicySection{ - { - SectionName: "section 1", - PolicyGroups: []*storage.PolicyGroup{ - { - FieldName: fieldnames.FilePath, - Values: policyValues, - }, - }, - }, - }, - LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, - EventSource: storage.EventSource_NODE_EVENT, - } -} diff --git a/pkg/booleanpolicy/policies_helpers_test.go b/pkg/booleanpolicy/policies_helpers_test.go new file mode 100644 index 0000000000000..c380fd932b7e0 --- /dev/null +++ b/pkg/booleanpolicy/policies_helpers_test.go @@ -0,0 +1,411 @@ +package booleanpolicy + +import ( + "regexp" + "strings" + "testing" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/augmentedobjs" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/booleanpolicy/policyversion" + "github.com/stackrox/rox/pkg/defaults/policies" + "github.com/stackrox/rox/pkg/images/types" + imgUtils "github.com/stackrox/rox/pkg/images/utils" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/protocompat" + "github.com/stackrox/rox/pkg/sliceutils" + "github.com/stackrox/rox/pkg/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +const ( + writableHostMountPolicyName = "Writeable Host Mount" + anyHostPathPolicyName = "Any Host Path" +) + +// basePoliciesTestSuite contains the shared state and helpers for all policy test suites. +type basePoliciesTestSuite struct { + suite.Suite + + defaultPolicies map[string]*storage.Policy + customPolicies map[string]*storage.Policy + + deployments map[string]*storage.Deployment + images map[string]*storage.Image + deploymentsToImages map[string][]*storage.Image + deploymentsToIndicators map[string][]*storage.ProcessIndicator +} + +func (s *basePoliciesTestSuite) SetupSuite() { + defaultPolicies, err := policies.DefaultPolicies() + s.Require().NoError(err) + + s.defaultPolicies = make(map[string]*storage.Policy, len(defaultPolicies)) + for _, p := range defaultPolicies { + s.defaultPolicies[p.GetName()] = p + } + + s.customPolicies = make(map[string]*storage.Policy) + for _, customPolicy := range []*storage.Policy{ + changeName(policyWithSingleKeyValue(fieldnames.WritableHostMount, "true", false), writableHostMountPolicyName), + changeName(policyWithSingleKeyValue(fieldnames.VolumeType, "hostpath", false), anyHostPathPolicyName), + } { + s.customPolicies[customPolicy.GetName()] = customPolicy + } +} + +func (s *basePoliciesTestSuite) TearDownSuite() {} + +func (s *basePoliciesTestSuite) SetupTest() { + s.deployments = make(map[string]*storage.Deployment) + s.images = make(map[string]*storage.Image) + s.deploymentsToImages = make(map[string][]*storage.Image) + s.deploymentsToIndicators = make(map[string][]*storage.ProcessIndicator) +} + +func (s *basePoliciesTestSuite) imageIDFromDep(deployment *storage.Deployment) string { + s.Require().Len(deployment.GetContainers(), 1, "This function only supports deployments with exactly one container") + id := deployment.GetContainers()[0].GetImage().GetId() + s.NotEmpty(id, "Deployment '%s' had no image id", protocompat.MarshalTextString(deployment)) + return id +} + +func (s *basePoliciesTestSuite) MustGetPolicy(name string) *storage.Policy { + p := s.defaultPolicies[name] + if p != nil { + return p + } + p = s.customPolicies[name] + if p != nil { + return p + } + s.FailNow("Policy not found: ", name) + return nil +} + +func (s *basePoliciesTestSuite) addDepAndImages(deployment *storage.Deployment, images ...*storage.Image) { + s.deployments[deployment.GetId()] = deployment + for _, i := range images { + s.images[i.GetId()] = i + s.deploymentsToImages[deployment.GetId()] = append(s.deploymentsToImages[deployment.GetId()], i) + } +} + +func (s *basePoliciesTestSuite) addImage(img *storage.Image) *storage.Image { + s.images[img.GetId()] = img + return img +} + +func (s *basePoliciesTestSuite) imageWithSignatureVerificationResults(name string, results []*storage.ImageSignatureVerificationResult) *storage.Image { + imageName, _, err := imgUtils.GenerateImageNameFromString(name) + if err != nil { + s.T().Fatalf("failed to parse image name %q: %v", name, err) + } + + imageName.FullName = name + + img := &storage.Image{ + Id: uuid.NewV4().String(), + Name: imageName, + } + + if results != nil { + img.SignatureVerificationData = &storage.ImageSignatureVerificationData{ + Results: results, + } + } + return img +} + +func (s *basePoliciesTestSuite) addIndicator(deploymentID, name, args, path string, lineage []string, uid uint32) *storage.ProcessIndicator { + deployment := s.deployments[deploymentID] + if len(deployment.GetContainers()) == 0 { + deployment.Containers = []*storage.Container{{Name: uuid.NewV4().String()}} + } + lineageInfo := make([]*storage.ProcessSignal_LineageInfo, len(lineage)) + for i, ancestor := range lineage { + lineageInfo[i] = &storage.ProcessSignal_LineageInfo{ + ParentExecFilePath: ancestor, + } + } + indicator := &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + DeploymentId: deploymentID, + ContainerName: deployment.GetContainers()[0].GetName(), + Signal: &storage.ProcessSignal{ + Name: name, + Args: args, + ExecFilePath: path, + Time: protocompat.TimestampNow(), + LineageInfo: lineageInfo, + Uid: uid, + }, + } + s.deploymentsToIndicators[deploymentID] = append(s.deploymentsToIndicators[deploymentID], indicator) + return indicator +} + +type testCase struct { + policyName string + expectedViolations map[string][]*storage.Alert_Violation + expectedProcessViolations map[string][]*storage.ProcessIndicator + + shouldNotMatch map[string]struct{} + sampleViolationForMatched string + allowUnvalidatedViolations bool +} + +func (s *basePoliciesTestSuite) getImagesForDeployment(deployment *storage.Deployment) []*storage.Image { + images := s.deploymentsToImages[deployment.GetId()] + if len(images) == 0 { + return make([]*storage.Image, len(deployment.GetContainers())) + } + s.Equal(len(deployment.GetContainers()), len(images)) + return images +} + +func (s *basePoliciesTestSuite) getViolations(policy *storage.Policy, dep EnhancedDeployment) Violations { + matcher, err := BuildDeploymentMatcher(policy) + s.NoError(err, "deployment matcher creation must succeed") + violations, err := matcher.MatchDeployment(nil, dep) + s.NoError(err, "deployment matcher run must succeed") + s.Empty(violations.ProcessViolation) + return violations +} + +// Free helper functions shared across test files. + +func changeName(p *storage.Policy, newName string) *storage.Policy { + p.Name = newName + return p +} + +func enhancedDeployment(dep *storage.Deployment, images []*storage.Image) EnhancedDeployment { + return EnhancedDeployment{ + Deployment: dep, + Images: images, + NetworkPoliciesApplied: &augmentedobjs.NetworkPoliciesApplied{ + HasIngressNetworkPolicy: true, + HasEgressNetworkPolicy: true, + }, + } +} + +func enhancedDeploymentWithNetworkPolicies(dep *storage.Deployment, images []*storage.Image, netpolApplied *augmentedobjs.NetworkPoliciesApplied) EnhancedDeployment { + return EnhancedDeployment{ + Deployment: dep, + Images: images, + NetworkPoliciesApplied: netpolApplied, + } +} + +func imageWithComponents(components []*storage.EmbeddedImageScanComponent) *storage.Image { + return &storage.Image{ + Id: uuid.NewV4().String(), + Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, + Scan: &storage.ImageScan{ + Components: components, + }, + } +} + +func imageWithLayers(layers []*storage.ImageLayer) *storage.Image { + return &storage.Image{ + Id: uuid.NewV4().String(), + Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, + Metadata: &storage.ImageMetadata{ + V1: &storage.V1Metadata{ + Layers: layers, + }, + }, + } +} + +func imageWithOS(os string) *storage.Image { + return &storage.Image{ + Id: uuid.NewV4().String(), + Name: &storage.ImageName{FullName: "docker.io/ASFASF", Remote: "ASFASF"}, + Scan: &storage.ImageScan{ + OperatingSystem: os, + }, + } +} + +func deploymentWithImageAnyID(img *storage.Image) *storage.Deployment { + return deploymentWithImage(uuid.NewV4().String(), img) +} + +func deploymentWithImage(id string, img *storage.Image) *storage.Deployment { + remoteSplit := strings.Split(img.GetName().GetFullName(), "/") + alphaOnly := regexp.MustCompile("[^A-Za-z]+") + containerName := alphaOnly.ReplaceAllString(remoteSplit[len(remoteSplit)-1], "") + return &storage.Deployment{ + Id: id, + Containers: []*storage.Container{{Id: img.GetId(), Name: containerName, Image: types.ToContainerImage(img)}}, + } +} + +func getViolationsWithAndWithoutCaching(t *testing.T, matcher func(cache *CacheReceptacle) (Violations, error)) Violations { + violations, err := matcher(nil) + require.NoError(t, err) + + var cache CacheReceptacle + violationsWithEmptyCache, err := matcher(&cache) + require.NoError(t, err) + assertViolations(t, violations, violationsWithEmptyCache) + + violationsWithNonEmptyCache, err := matcher(&cache) + require.NoError(t, err) + assertViolations(t, violations, violationsWithNonEmptyCache) + + return violations +} + +func assertViolations(t testing.TB, expected, actual Violations) { + t.Helper() + protoassert.Equal(t, expected.ProcessViolation, actual.ProcessViolation) + protoassert.SlicesEqual(t, expected.AlertViolations, actual.AlertViolations) +} + +func policyWithGroups(eventSrc storage.EventSource, groups ...*storage.PolicyGroup) *storage.Policy { + return &storage.Policy{ + PolicyVersion: policyversion.CurrentVersion().String(), + Name: uuid.NewV4().String(), + EventSource: eventSrc, + PolicySections: []*storage.PolicySection{{PolicyGroups: groups}}, + } +} + +func policyGroupWithSingleKeyValue(fieldName, value string, negate bool) *storage.PolicyGroup { + return &storage.PolicyGroup{FieldName: fieldName, Values: []*storage.PolicyValue{{Value: value}}, Negate: negate} +} + +func policyWithSingleKeyValue(fieldName, value string, negate bool) *storage.Policy { + return policyWithGroups(storage.EventSource_NOT_APPLICABLE, policyGroupWithSingleKeyValue(fieldName, value, negate)) +} + +func policyWithSingleFieldAndValues(fieldName string, values []string, negate bool, op storage.BooleanOperator) *storage.Policy { + return policyWithGroups(storage.EventSource_NOT_APPLICABLE, &storage.PolicyGroup{FieldName: fieldName, Values: sliceutils.Map(values, func(val string) *storage.PolicyValue { + return &storage.PolicyValue{Value: val} + }), Negate: negate, BooleanOperator: op}) +} + +// File access test helpers shared across runtime and node criteria tests. + +func newActualFileAccessEvent(path string, operation storage.FileAccess_Operation) *storage.FileAccess { + return &storage.FileAccess{ + File: &storage.FileAccess_File{ActualPath: path}, + Operation: operation, + } +} + +func newEffectiveFileAccessEvent(path string, operation storage.FileAccess_Operation) *storage.FileAccess { + return &storage.FileAccess{ + File: &storage.FileAccess_File{EffectivePath: path}, + Operation: operation, + } +} + +func newDualPathFileAccessEvent(actualPath, effectivePath string, operation storage.FileAccess_Operation) *storage.FileAccess { + return &storage.FileAccess{ + File: &storage.FileAccess_File{ + ActualPath: actualPath, + EffectivePath: effectivePath, + }, + Operation: operation, + } +} + +func newFileAccessPolicy(eventSource storage.EventSource, operations []storage.FileAccess_Operation, negate bool, paths ...string) *storage.Policy { + var pathValues []*storage.PolicyValue + for _, path := range paths { + pathValues = append(pathValues, &storage.PolicyValue{Value: path}) + } + + policyGroups := []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: pathValues, + }, + } + + var operationValues []*storage.PolicyValue + for _, op := range operations { + operationValues = append(operationValues, &storage.PolicyValue{Value: op.String()}) + } + + if len(operationValues) != 0 { + policyGroups = append(policyGroups, &storage.PolicyGroup{ + FieldName: fieldnames.FileOperation, + Values: operationValues, + Negate: negate, + }) + } + + return &storage.Policy{ + Id: uuid.NewV4().String(), + PolicyVersion: "1.1", + Name: "File Access Policy", + Severity: storage.Severity_HIGH_SEVERITY, + Categories: []string{"File System"}, + PolicySections: []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: policyGroups, + }, + }, + LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, + EventSource: eventSource, + } +} + +func newDualPathPolicy(actualPath, effectivePath string, operations []storage.FileAccess_Operation) *storage.Policy { + policyGroups := []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: actualPath}, {Value: effectivePath}}, + }, + } + + if len(operations) > 0 { + var operationValues []*storage.PolicyValue + for _, op := range operations { + operationValues = append(operationValues, &storage.PolicyValue{Value: op.String()}) + } + policyGroups = append(policyGroups, &storage.PolicyGroup{ + FieldName: fieldnames.FileOperation, + Values: operationValues, + }) + } + + return &storage.Policy{ + Id: uuid.NewV4().String(), + PolicyVersion: "1.1", + Name: "Dual Path Policy", + Severity: storage.Severity_HIGH_SEVERITY, + Categories: []string{"File System"}, + PolicySections: []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: policyGroups, + }, + }, + LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, + EventSource: storage.EventSource_DEPLOYMENT_EVENT, + } +} + +func newMultiSectionPolicy(eventSource storage.EventSource, sections []*storage.PolicySection) *storage.Policy { + return &storage.Policy{ + Id: uuid.NewV4().String(), + PolicyVersion: "1.1", + Name: "Multi-Section Policy", + Severity: storage.Severity_HIGH_SEVERITY, + Categories: []string{"File System"}, + PolicySections: sections, + LifecycleStages: []storage.LifecycleStage{storage.LifecycleStage_RUNTIME}, + EventSource: eventSource, + } +} diff --git a/pkg/booleanpolicy/runtime_criteria_test.go b/pkg/booleanpolicy/runtime_criteria_test.go new file mode 100644 index 0000000000000..7f9105eacc1a9 --- /dev/null +++ b/pkg/booleanpolicy/runtime_criteria_test.go @@ -0,0 +1,1674 @@ +package booleanpolicy + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/fixtures" + "github.com/stackrox/rox/pkg/kubernetes" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/protoutils" + "github.com/stackrox/rox/pkg/set" + "github.com/stackrox/rox/pkg/testutils" + "github.com/stackrox/rox/pkg/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestRuntimeCriteria(t *testing.T) { + t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") + suite.Run(t, new(RuntimeCriteriaTestSuite)) +} + +type RuntimeCriteriaTestSuite struct { + basePoliciesTestSuite +} + +func processBaselineMessage(dep *storage.Deployment, baseline bool, privileged bool, processNames ...string) []*storage.Alert_Violation { + violations := make([]*storage.Alert_Violation, 0, len(processNames)) + containerName := dep.GetContainers()[0].GetName() + for _, p := range processNames { + if baseline { + msg := fmt.Sprintf("Unexpected process '%s' in container '%s'", p, containerName) + violations = append(violations, &storage.Alert_Violation{Message: msg}) + } + if privileged { + violations = append(violations, privilegedMessage(dep)...) + } + } + return violations +} + +func privilegedMessage(dep *storage.Deployment) []*storage.Alert_Violation { + containerName := dep.GetContainers()[0].GetName() + return []*storage.Alert_Violation{{Message: fmt.Sprintf("Container '%s' is privileged", containerName)}} +} + +func newIndicator(deployment *storage.Deployment, name, args, execFilePath string) *storage.ProcessIndicator { + return &storage.ProcessIndicator{ + Id: uuid.NewV4().String(), + ContainerName: deployment.GetContainers()[0].GetName(), + Signal: &storage.ProcessSignal{ + Name: name, + Args: args, + ExecFilePath: execFilePath, + }, + } +} + +func podExecViolationMsg(pod, container, command string) *storage.Alert_Violation { + if command == "" { + return &storage.Alert_Violation{ + Message: fmt.Sprintf("Kubernetes API received exec request into pod '%s' container '%s'", pod, container), + Type: storage.Alert_Violation_K8S_EVENT, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: "pod", Value: pod}, + {Key: "container", Value: container}, + }, + }, + }, + } + } + + return &storage.Alert_Violation{ + Message: fmt.Sprintf("Kubernetes API received exec '%s' request into pod '%s' container '%s'", + command, pod, container), + Type: storage.Alert_Violation_K8S_EVENT, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: "pod", Value: pod}, + {Key: "container", Value: container}, + {Key: "commands", Value: command}, + }, + }, + }, + } +} + +func podPortForwardViolationMsg(pod string, port int) *storage.Alert_Violation { + return &storage.Alert_Violation{ + Message: fmt.Sprintf("Kubernetes API received port forward request to pod '%s' ports '%s'", pod, strconv.Itoa(port)), + Type: storage.Alert_Violation_K8S_EVENT, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: "pod", Value: pod}, + {Key: "ports", Value: strconv.Itoa(port)}, + }, + }, + }, + } +} + +func podExecEvent(pod, container, command string) *storage.KubernetesEvent { + return &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: pod, + Resource: storage.KubernetesEvent_Object_PODS_EXEC, + }, + ObjectArgs: &storage.KubernetesEvent_PodExecArgs_{ + PodExecArgs: &storage.KubernetesEvent_PodExecArgs{ + Container: container, + Commands: []string{command}, + }, + }, + } +} + +func podPortForwardEvent(pod string, port int32) *storage.KubernetesEvent { + return &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: pod, + Resource: storage.KubernetesEvent_Object_PODS_PORTFORWARD, + }, + ObjectArgs: &storage.KubernetesEvent_PodPortForwardArgs_{ + PodPortForwardArgs: &storage.KubernetesEvent_PodPortForwardArgs{ + Ports: []int32{port}, + }, + }, + } +} + +func podAttachEvent(pod, container string) *storage.KubernetesEvent { + return &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: pod, + Resource: storage.KubernetesEvent_Object_PODS_ATTACH, + }, + ObjectArgs: &storage.KubernetesEvent_PodAttachArgs_{ + PodAttachArgs: &storage.KubernetesEvent_PodAttachArgs{ + Container: container, + }, + }, + } +} + +func podAttachViolationMsg(pod, container string) *storage.Alert_Violation { + attrs := []*storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{ + {Key: "pod", Value: pod}, + } + if container != "" { + attrs = append(attrs, &storage.Alert_Violation_KeyValueAttrs_KeyValueAttr{Key: "container", Value: container}) + } + + message := "Kubernetes API received attach request" + if pod != "" { + message = fmt.Sprintf("Kubernetes API received attach request to pod '%s'", pod) + if container != "" { + message = fmt.Sprintf("Kubernetes API received attach request to pod '%s' container '%s'", pod, container) + } + } + + return &storage.Alert_Violation{ + Message: message, + Type: storage.Alert_Violation_K8S_EVENT, + MessageAttributes: &storage.Alert_Violation_KeyValueAttrs_{ + KeyValueAttrs: &storage.Alert_Violation_KeyValueAttrs{ + Attrs: attrs, + }, + }, + } +} + +func (suite *RuntimeCriteriaTestSuite) TestProcessBaseline() { + privilegedDep := fixtures.GetDeployment().CloneVT() + privilegedDep.Id = "PRIVILEGED" + suite.addDepAndImages(privilegedDep) + + nonPrivilegedDep := fixtures.GetDeployment().CloneVT() + nonPrivilegedDep.Id = "NOTPRIVILEGED" + nonPrivilegedDep.Containers[0].SecurityContext.Privileged = false + suite.addDepAndImages(nonPrivilegedDep) + + const aptGetKey = "apt-get" + const aptGet2Key = "apt-get2" + const curlKey = "curl" + const bashKey = "bash" + + indicators := make(map[string]map[string]*storage.ProcessIndicator) + for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { + indicators[dep.GetId()] = map[string]*storage.ProcessIndicator{ + aptGetKey: suite.addIndicator(dep.GetId(), "apt-get", "install nginx", "/bin/apt-get", nil, 0), + aptGet2Key: suite.addIndicator(dep.GetId(), "apt-get", "update", "/bin/apt-get", nil, 0), + curlKey: suite.addIndicator(dep.GetId(), "curl", "https://stackrox.io", "/bin/curl", nil, 0), + bashKey: suite.addIndicator(dep.GetId(), "bash", "attach.sh", "/bin/bash", nil, 0), + } + } + processesNotInBaseline := map[string]set.StringSet{ + privilegedDep.GetId(): set.NewStringSet(aptGetKey, aptGet2Key, bashKey), + nonPrivilegedDep.GetId(): set.NewStringSet(aptGetKey, curlKey, bashKey), + } + + // Plain groups + aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) + privilegedGroup := policyGroupWithSingleKeyValue(fieldnames.PrivilegedContainer, "true", false) + baselineGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedProcessExecuted, "true", false) + + for _, testCase := range []struct { + groups []*storage.PolicyGroup + + // Deployment ids to indicator keys + expectedMatches map[string][]string + expectedProcessMatches map[string][]string + // Deployment ids to violations + expectedViolations map[string][]*storage.Alert_Violation + }{ + { + groups: []*storage.PolicyGroup{aptGetGroup}, + // only process violation, no alert violation + expectedMatches: map[string][]string{}, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + nonPrivilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + { + groups: []*storage.PolicyGroup{baselineGroup}, + expectedMatches: map[string][]string{}, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, + }, + }, + + { + groups: []*storage.PolicyGroup{privilegedGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, curlKey, bashKey}, + }, + expectedProcessMatches: map[string][]string{}, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get", "curl", "bash"), + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, baselineGroup}, + expectedMatches: map[string][]string{}, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + nonPrivilegedDep.GetId(): {aptGetKey}, + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get"), + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + { + groups: []*storage.PolicyGroup{privilegedGroup, baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup, baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + } { + c := testCase + suite.T().Run(fmt.Sprintf("%+v", c.groups), func(t *testing.T) { + policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) + + m, err := BuildDeploymentWithProcessMatcher(policy) + require.NoError(t, err) + + actualMatches := make(map[string][]string) + actualProcessMatches := make(map[string][]string) + actualViolations := make(map[string][]*storage.Alert_Violation) + for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { + for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { + violations, err := m.MatchDeploymentWithProcess(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep)), indicators[dep.GetId()][key], processesNotInBaseline[dep.GetId()].Contains(key)) + suite.Require().NoError(err) + if len(violations.AlertViolations) > 0 { + actualMatches[dep.GetId()] = append(actualMatches[dep.GetId()], key) + actualViolations[dep.GetId()] = append(actualViolations[dep.GetId()], violations.AlertViolations...) + } + if violations.ProcessViolation != nil { + actualProcessMatches[dep.GetId()] = append(actualProcessMatches[dep.GetId()], key) + } + + } + } + assert.Equal(t, c.expectedMatches, actualMatches) + assert.Equal(t, c.expectedProcessMatches, actualProcessMatches) + + for id, violations := range c.expectedViolations { + assert.Contains(t, actualViolations, id) + protoassert.ElementsMatch(t, violations, actualViolations[id]) + } + }) + } +} + +func (suite *RuntimeCriteriaTestSuite) TestKubeEventConstraints() { + podExecGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_EXEC", false) + podAttachGroup := policyGroupWithSingleKeyValue(fieldnames.KubeResource, "PODS_ATTACH", false) + + aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) + + for _, c := range []struct { + event *storage.KubernetesEvent + groups []*storage.PolicyGroup + expectedViolations []*storage.Alert_Violation + builderErr bool + withProcessSection bool + }{ + // PODS_EXEC test cases + { + event: podExecEvent("p1", "c1", "cmd"), + groups: []*storage.PolicyGroup{podExecGroup}, + expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "cmd")}, + }, + { + event: podExecEvent("p1", "c1", ""), + groups: []*storage.PolicyGroup{podExecGroup}, + expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, + }, + + { + groups: []*storage.PolicyGroup{podExecGroup}, + }, + { + event: podPortForwardEvent("p1", 8000), + groups: []*storage.PolicyGroup{podExecGroup}, + }, + { + event: podPortForwardEvent("p1", 8000), + groups: []*storage.PolicyGroup{podExecGroup, aptGetGroup}, + builderErr: true, + }, + { + event: podExecEvent("p1", "c1", ""), + groups: []*storage.PolicyGroup{podExecGroup}, + expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, + withProcessSection: true, + }, + // PODS_ATTACH test cases + { + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podAttachGroup}, + expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "c1")}, + }, + { + event: podAttachEvent("p1", ""), + groups: []*storage.PolicyGroup{podAttachGroup}, + expectedViolations: []*storage.Alert_Violation{podAttachViolationMsg("p1", "")}, + }, + { + // No event provided, should not match + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Port forward event should not match attach policy + event: podPortForwardEvent("p1", 8000), + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Exec event should not match attach policy + event: podExecEvent("p1", "c1", "cmd"), + groups: []*storage.PolicyGroup{podAttachGroup}, + }, + { + // Attach event should not match exec policy + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podExecGroup}, + }, + { + // Attach policy with process group should fail builder + event: podAttachEvent("p1", "c1"), + groups: []*storage.PolicyGroup{podAttachGroup, aptGetGroup}, + builderErr: true, + }, + } { + suite.T().Run(fmt.Sprintf("%+v", c.groups), func(t *testing.T) { + policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) + if c.withProcessSection { + policy.PolicySections = append(policy.PolicySections, + &storage.PolicySection{PolicyGroups: []*storage.PolicyGroup{aptGetGroup}}) + } + + m, err := BuildKubeEventMatcher(policy) + if c.builderErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + actualViolations, err := m.MatchKubeEvent(nil, c.event, &storage.Deployment{}) + suite.Require().NoError(err) + + assert.Nil(t, actualViolations.ProcessViolation) + if len(c.expectedViolations) == 0 { + assert.Nil(t, actualViolations.AlertViolations) + } else { + protoassert.ElementsMatch(t, c.expectedViolations, actualViolations.AlertViolations) + } + }) + } +} + +func (suite *RuntimeCriteriaTestSuite) TestKubeEventDefaultPolicies() { + for _, c := range []struct { + policyName string + event *storage.KubernetesEvent + expectedViolations []*storage.Alert_Violation + }{ + { + policyName: "Kubernetes Actions: Exec into Pod", + event: podExecEvent("p1", "c1", "apt-get"), + expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "apt-get")}, + }, + { + policyName: "Kubernetes Actions: Exec into Pod", + event: podPortForwardEvent("p1", 8000), + }, + // Event without CREATE. + { + policyName: "Kubernetes Actions: Exec into Pod", + event: &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: "p1", + Resource: storage.KubernetesEvent_Object_PODS_EXEC, + }, + ObjectArgs: &storage.KubernetesEvent_PodExecArgs_{ + PodExecArgs: &storage.KubernetesEvent_PodExecArgs{ + Container: "c1", + }, + }, + }, + expectedViolations: []*storage.Alert_Violation{podExecViolationMsg("p1", "c1", "")}, + }, + { + policyName: "Kubernetes Actions: Port Forward to Pod", + }, + { + policyName: "Kubernetes Actions: Port Forward to Pod", + event: podPortForwardEvent("p1", 8000), + expectedViolations: []*storage.Alert_Violation{podPortForwardViolationMsg("p1", 8000)}, + }, + { + policyName: "Kubernetes Actions: Port Forward to Pod", + event: &storage.KubernetesEvent{ + Object: &storage.KubernetesEvent_Object{ + Name: "p1", + Resource: storage.KubernetesEvent_Object_PODS_PORTFORWARD, + }, + ObjectArgs: &storage.KubernetesEvent_PodPortForwardArgs_{ + PodPortForwardArgs: &storage.KubernetesEvent_PodPortForwardArgs{ + Ports: []int32{8000}, + }, + }, + }, + expectedViolations: []*storage.Alert_Violation{podPortForwardViolationMsg("p1", 8000)}, + }, + } { + suite.T().Run(fmt.Sprintf("%s:%s", c.policyName, kubernetes.EventAsString(c.event)), func(t *testing.T) { + policy := suite.MustGetPolicy(c.policyName) + m, err := BuildKubeEventMatcher(policy) + require.NoError(t, err) + + actualViolations, err := m.MatchKubeEvent(nil, c.event, &storage.Deployment{}) + suite.Require().NoError(err) + + assert.Nil(t, actualViolations.ProcessViolation) + if len(c.expectedViolations) == 0 { + for _, a := range actualViolations.AlertViolations { + fmt.Printf("%v", protoutils.NewWrapper(a)) + } + + assert.Nil(t, actualViolations.AlertViolations) + } else { + protoassert.ElementsMatch(t, c.expectedViolations, actualViolations.AlertViolations) + } + }) + } +} + +func BenchmarkProcessPolicies(b *testing.B) { + privilegedDep := fixtures.GetDeployment().CloneVT() + privilegedDep.Id = "PRIVILEGED" + images := []*storage.Image{fixtures.GetImage(), fixtures.GetImage()} + + nonPrivilegedDep := fixtures.GetDeployment().CloneVT() + nonPrivilegedDep.Id = "NOTPRIVILEGED" + nonPrivilegedDep.Containers[0].SecurityContext.Privileged = false + + const aptGetKey = "apt-get" + const aptGet2Key = "apt-get2" + const curlKey = "curl" + const bashKey = "bash" + + indicators := make(map[string]map[string]*storage.ProcessIndicator) + for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { + indicators[dep.GetId()] = map[string]*storage.ProcessIndicator{ + aptGetKey: newIndicator(dep, "apt-get", "install nginx", "/bin/apt-get"), + aptGet2Key: newIndicator(dep, "apt-get", "update", "/bin/apt-get"), + curlKey: newIndicator(dep, "curl", "https://stackrox.io", "/bin/curl"), + bashKey: newIndicator(dep, "bash", "attach.sh", "/bin/bash"), + } + } + processesNotInBaseline := map[string]set.StringSet{ + privilegedDep.GetId(): set.NewStringSet(aptGetKey, aptGet2Key, bashKey), + nonPrivilegedDep.GetId(): set.NewStringSet(aptGetKey, curlKey, bashKey), + } + + // Plain groups + aptGetGroup := policyGroupWithSingleKeyValue(fieldnames.ProcessName, "apt-get", false) + privilegedGroup := policyGroupWithSingleKeyValue(fieldnames.PrivilegedContainer, "true", false) + baselineGroup := policyGroupWithSingleKeyValue(fieldnames.UnexpectedProcessExecuted, "true", false) + + for _, testCase := range []struct { + groups []*storage.PolicyGroup + + // Deployment ids to indicator keys + expectedMatches map[string][]string + expectedProcessMatches map[string][]string + // Deployment ids to violations + expectedViolations map[string][]*storage.Alert_Violation + }{ + { + groups: []*storage.PolicyGroup{aptGetGroup}, + // only process violation, no alert violation + expectedMatches: map[string][]string{}, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + nonPrivilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + { + groups: []*storage.PolicyGroup{baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + nonPrivilegedDep.GetId(): {aptGetKey, curlKey, bashKey}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, false, "apt-get", "apt-get", "bash"), + nonPrivilegedDep.GetId(): processBaselineMessage(nonPrivilegedDep, true, false, "apt-get", "bash", "curl"), + }, + }, + + { + groups: []*storage.PolicyGroup{privilegedGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, curlKey, bashKey}, + }, + expectedProcessMatches: map[string][]string{}, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get", "curl", "bash"), + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + nonPrivilegedDep.GetId(): {aptGetKey}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, false, "apt-get", "apt-get"), + nonPrivilegedDep.GetId(): processBaselineMessage(nonPrivilegedDep, true, false, "apt-get"), + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + nonPrivilegedDep.GetId(): {aptGetKey}, + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, false, true, "apt-get", "apt-get"), + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + { + groups: []*storage.PolicyGroup{privilegedGroup, baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, true, "apt-get", "apt-get", "bash"), + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key, bashKey}, + }, + }, + { + groups: []*storage.PolicyGroup{aptGetGroup, privilegedGroup, baselineGroup}, + expectedMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + expectedViolations: map[string][]*storage.Alert_Violation{ + privilegedDep.GetId(): processBaselineMessage(privilegedDep, true, true, "apt-get", "apt-get"), + }, + expectedProcessMatches: map[string][]string{ + privilegedDep.GetId(): {aptGetKey, aptGet2Key}, + }, + }, + } { + c := testCase + b.Run(fmt.Sprintf("%+v", c.groups), func(b *testing.B) { + policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, c.groups...) + m, err := BuildDeploymentWithProcessMatcher(policy) + require.NoError(b, err) + + b.ResetTimer() + for b.Loop() { + for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { + for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { + _, err := m.MatchDeploymentWithProcess(nil, enhancedDeployment(dep, images), indicators[dep.GetId()][key], processesNotInBaseline[dep.GetId()].Contains(key)) + require.NoError(b, err) + } + } + } + }) + } + + policy := policyWithGroups(storage.EventSource_DEPLOYMENT_EVENT, aptGetGroup, privilegedGroup, baselineGroup) + m, err := BuildDeploymentWithProcessMatcher(policy) + require.NoError(b, err) + for _, dep := range []*storage.Deployment{privilegedDep, nonPrivilegedDep} { + for _, key := range []string{aptGetKey, aptGet2Key, curlKey, bashKey} { + indicator := indicators[dep.GetId()][key] + notInBaseline := processesNotInBaseline[dep.GetId()].Contains(key) + b.Run(fmt.Sprintf("benchmark caching: %s/%s", dep.GetId(), key), func(b *testing.B) { + var resNoCaching Violations + b.Run("no caching", func(b *testing.B) { + for b.Loop() { + var err error + resNoCaching, err = m.MatchDeploymentWithProcess(nil, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) + require.NoError(b, err) + } + }) + + var resWithCaching Violations + b.Run("with caching", func(b *testing.B) { + var cache CacheReceptacle + for b.Loop() { + var err error + resWithCaching, err = m.MatchDeploymentWithProcess(&cache, enhancedDeployment(privilegedDep, images), indicator, notInBaseline) + require.NoError(b, err) + } + }) + assertViolations(b, resNoCaching, resWithCaching) + }) + } + } +} + +func (suite *RuntimeCriteriaTestSuite) TestDeploymentFileAccess() { + deployment := &storage.Deployment{ + Name: "test-deployment", + Id: "test-deployment-id", + } + + type eventWrapper struct { + access *storage.FileAccess + expectAlert bool + } + + for _, tc := range []struct { + description string + policy *storage.Policy + events []eventWrapper + }{ + { + description: "Deployment file open policy with matching event", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file open policy with mismatching event (UNLINK)", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: false, + }, + }, + }, + { + description: "Deployment file open policy with mismatching event (/tmp/foo)", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Deployment file policy with negated file operation", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, true, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: false, // open is the only event we should ignore + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with multiple operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: false, + }, + }, + }, + { + description: "Deployment file policy with multiple negated operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, true, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with multiple files and single operation", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", "/etc/shadow", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with multiple files and multiple operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, + "/etc/passwd", "/etc/shadow", + ), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_CREATE), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/tmp/foo", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Deployment file policy with no operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd"), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with all allowed files", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newActualFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with suffix", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), + events: []eventWrapper{ + { + access: newActualFileAccessEvent("/etc/passwd-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/etc/shadow-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/etc/ssh/sshd_config-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newActualFileAccessEvent("/etc/sudoers-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + } { + testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, true) + defer testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, false) + ResetFieldMetadataSingleton(suite.T()) + defer ResetFieldMetadataSingleton(suite.T()) + + suite.Run(tc.description, func() { + matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) + suite.Require().NoError(err) + + for _, event := range tc.events { + var cache CacheReceptacle + enhancedDep := EnhancedDeployment{ + Deployment: deployment, + Images: nil, + NetworkPoliciesApplied: nil, + } + violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDep, event.access) + suite.Require().NoError(err) + + if event.expectAlert { + suite.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") + suite.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") + + fileAccess := violations.AlertViolations[0].GetFileAccess() + suite.Require().NotNil(fileAccess, "expected file access info") + + suite.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) + suite.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) + suite.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) + } else { + suite.Require().Empty(violations.AlertViolations, "expected no alerts") + } + } + }) + } +} + +func (suite *RuntimeCriteriaTestSuite) TestDeploymentEffectiveFileAccess() { + deployment := &storage.Deployment{ + Name: "test-deployment", + Id: "test-deployment-id", + } + + type eventWrapper struct { + access *storage.FileAccess + expectAlert bool + } + + for _, tc := range []struct { + description string + policy *storage.Policy + events []eventWrapper + }{ + { + description: "Deployment effective file open policy with matching event", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment effective file open policy with mismatching event (UNLINK)", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: false, + }, + }, + }, + { + description: "Deployment effective file open policy with mismatching event (/etc/sudoers)", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Deployment effective file policy with negated file operation", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, true, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: false, // open is the only event we should ignore + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment effective file policy with multiple operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: false, + }, + }, + }, + { + description: "Deployment effective file policy with multiple negated operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, true, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment effective file policy with multiple files and single operation", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", "/etc/shadow", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment effective file policy with multiple files and multiple operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN, storage.FileAccess_CREATE}, false, + "/etc/passwd", "/etc/shadow", + ), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_CREATE), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Deployment effective file policy with no operations", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd"), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_CREATE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OWNERSHIP_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_PERMISSION_CHANGE), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_UNLINK), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_RENAME), + expectAlert: true, + }, + }, + }, + { + description: "Deployment effective file policy with all allowed files", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/ssh/sshd_config", storage.FileAccess_OPEN), + expectAlert: true, + }, + { + access: newEffectiveFileAccessEvent("/etc/sudoers", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Deployment file policy with suffix", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, nil, false, "/etc/passwd", "/etc/ssh/sshd_config", "/etc/shadow", "/etc/sudoers"), + events: []eventWrapper{ + { + access: newEffectiveFileAccessEvent("/etc/passwd-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/shadow-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/ssh/sshd_config-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + { + access: newEffectiveFileAccessEvent("/etc/sudoers-suffix", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + } { + testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, true) + defer testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, false) + ResetFieldMetadataSingleton(suite.T()) + defer ResetFieldMetadataSingleton(suite.T()) + + suite.Run(tc.description, func() { + matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) + suite.Require().NoError(err) + + for _, event := range tc.events { + var cache CacheReceptacle + enhancedDep := EnhancedDeployment{ + Deployment: deployment, + Images: nil, + NetworkPoliciesApplied: nil, + } + violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDep, event.access) + suite.Require().NoError(err) + + if event.expectAlert { + suite.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") + suite.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") + + fileAccess := violations.AlertViolations[0].GetFileAccess() + suite.Require().NotNil(fileAccess, "expected file access info") + + suite.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) + suite.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) + suite.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) + } else { + suite.Require().Empty(violations.AlertViolations, "expected no alerts") + } + } + }) + } +} + +func (suite *RuntimeCriteriaTestSuite) TestDeploymentDualPathMatching() { + deployment := &storage.Deployment{ + Name: "test-deployment", + Id: "test-deployment-id", + } + + type eventWrapper struct { + access *storage.FileAccess + expectAlert bool + } + + for _, tc := range []struct { + description string + policy *storage.Policy + events []eventWrapper + }{ + { + description: "Event with both paths - policy matches actual path only", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/passwd", + ), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy matches effective only", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/shadow", + ), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy requires BOTH paths (AND within section)", + policy: newDualPathPolicy("/etc/passwd", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Multi-section policy - first section matches (OR behavior)", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "OPEN"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "OPEN"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Multi-section policy - second section matches (OR behavior)", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "OPEN"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "OPEN"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Multi-section with mixed path types - actual path section matches", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/passwd"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Multi-section with mixed path types - effective path section matches", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/shadow"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Multi-section with dual paths in one section - complex AND/OR", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{ + {Value: "/etc/passwd"}, + {Value: "/etc/shadow"}, + }, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + // Matches section 1 (both actual and effective paths match) + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + + // Invalid/edge cases - unexpected behaviors + { + description: "Event with both paths - policy matches neither", + policy: newFileAccessPolicy(storage.EventSource_DEPLOYMENT_EVENT, + []storage.FileAccess_Operation{storage.FileAccess_OPEN}, false, + "/etc/sudoers", + ), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Event with both paths - policy requires EITHER and only actual path matches", + policy: newDualPathPolicy("/etc/passwd", "/etc/sudoers", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy requires EITHER and only effective matches", + policy: newDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy requires EITHER and only BOTH match", + policy: newDualPathPolicy("/etc/sudoers", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_OPEN}), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/sudoers", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: true, + }, + }, + }, + { + description: "Event with both paths - policy requires BOTH but operation doesn't match", + policy: newDualPathPolicy("/etc/passwd", "/etc/shadow", []storage.FileAccess_Operation{storage.FileAccess_CREATE}), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Multi-section policy - no sections match", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/ssh/sshd_config"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/sudoers"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + { + description: "Multi-section with dual paths - neither section matches completely", + policy: newMultiSectionPolicy(storage.EventSource_DEPLOYMENT_EVENT, []*storage.PolicySection{ + { + SectionName: "section 1", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/passwd"}, {Value: "/etc/shadow"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "UNLINK"}}, + }, + }, + }, + { + SectionName: "section 2", + PolicyGroups: []*storage.PolicyGroup{ + { + FieldName: fieldnames.FilePath, + Values: []*storage.PolicyValue{{Value: "/etc/shadow"}, {Value: "/etc/ssh/sshd_config"}}, + }, + { + FieldName: fieldnames.FileOperation, + Values: []*storage.PolicyValue{{Value: "UNLINK"}}, + }, + }, + }, + }), + events: []eventWrapper{ + { + // Section 1: actual matches, effective doesn't (AND fails) + // Section 2: actual doesn't match, effective does (AND fails) + // Overall: no section fully matches (OR fails) + access: newDualPathFileAccessEvent("/etc/passwd", "/etc/shadow", storage.FileAccess_OPEN), + expectAlert: false, + }, + }, + }, + } { + testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, true) + defer testutils.MustUpdateFeature(suite.T(), features.SensitiveFileActivity, false) + ResetFieldMetadataSingleton(suite.T()) + defer ResetFieldMetadataSingleton(suite.T()) + + suite.Run(tc.description, func() { + matcher, err := BuildDeploymentWithFileAccessMatcher(tc.policy) + suite.Require().NoError(err) + + for _, event := range tc.events { + var cache CacheReceptacle + enhancedDep := EnhancedDeployment{ + Deployment: deployment, + Images: nil, + NetworkPoliciesApplied: nil, + } + violations, err := matcher.MatchDeploymentWithFileAccess(&cache, enhancedDep, event.access) + suite.Require().NoError(err) + + if event.expectAlert { + suite.Require().Len(violations.AlertViolations, 1, "expected one file access violation in alert") + suite.Require().Equal(storage.Alert_Violation_FILE_ACCESS, violations.AlertViolations[0].GetType(), "expected FILE_ACCESS type") + + fileAccess := violations.AlertViolations[0].GetFileAccess() + suite.Require().NotNil(fileAccess, "expected file access info") + + suite.Require().Equal(event.access.GetFile().GetEffectivePath(), fileAccess.GetFile().GetEffectivePath()) + suite.Require().Equal(event.access.GetFile().GetActualPath(), fileAccess.GetFile().GetActualPath()) + suite.Require().Equal(event.access.GetOperation(), fileAccess.GetOperation()) + } else { + suite.Require().Empty(violations.AlertViolations, "expected no alerts") + } + } + }) + } +} diff --git a/pkg/booleanpolicy/workload_criteria_test.go b/pkg/booleanpolicy/workload_criteria_test.go new file mode 100644 index 0000000000000..cb5798e4b2589 --- /dev/null +++ b/pkg/booleanpolicy/workload_criteria_test.go @@ -0,0 +1,958 @@ +package booleanpolicy + +import ( + "fmt" + "strings" + "testing" + + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/booleanpolicy/fieldnames" + "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/fixtures" + "github.com/stackrox/rox/pkg/protoassert" + "github.com/stackrox/rox/pkg/set" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// WorkloadCriteriaTestSuite tests workload-related policy criteria. +type WorkloadCriteriaTestSuite struct { + basePoliciesTestSuite +} + +func TestWorkloadCriteria(t *testing.T) { + t.Setenv(features.CVEFixTimestampCriteria.EnvVar(), "true") + suite.Run(t, new(WorkloadCriteriaTestSuite)) +} + +func rbacPermissionMessage(level string) []*storage.Alert_Violation { + permissionToDescMap := map[string]string{ + "NONE": "no specified access", + "DEFAULT": "default access", + "ELEVATED_IN_NAMESPACE": "elevated access in namespace", + "ELEVATED_CLUSTER_WIDE": "elevated access cluster wide", + "CLUSTER_ADMIN": "cluster admin access"} + return []*storage.Alert_Violation{{Message: fmt.Sprintf("Service account permission level with %s", permissionToDescMap[level])}} +} + +func (suite *WorkloadCriteriaTestSuite) TestMapPolicyMatchOne() { + noAnnotation := &storage.Deployment{ + Id: "noAnnotation", + } + suite.addDepAndImages(noAnnotation) + + noValidAnnotation := &storage.Deployment{ + Id: "noValidAnnotation", + Annotations: map[string]string{ + "email": "notavalidemail", + "someotherannotation": "vv@stackrox.com", + }, + } + suite.addDepAndImages(noValidAnnotation) + + validAnnotation := &storage.Deployment{ + Id: "validAnnotation", + Annotations: map[string]string{ + "email": "joseph@rules.gov", + }, + } + suite.addDepAndImages(validAnnotation) + + policy := suite.defaultPolicies["Required Annotation: Email"] + + m, err := BuildDeploymentMatcher(policy) + suite.NoError(err) + + for _, testCase := range []struct { + dep *storage.Deployment + expectedViolations []string + }{ + { + noAnnotation, + []string{"Required annotation not found (found annotations: )"}, + }, + { + noValidAnnotation, + []string{"Required annotation not found (found annotations: email=notavalidemail, someotherannotation=vv@stackrox.com)"}, + }, + { + validAnnotation, + nil, + }, + } { + c := testCase + suite.Run(c.dep.GetId(), func() { + matched, err := m.MatchDeployment(nil, enhancedDeployment(c.dep, nil)) + suite.NoError(err) + var expectedMessages []*storage.Alert_Violation + for _, v := range c.expectedViolations { + expectedMessages = append(expectedMessages, &storage.Alert_Violation{Message: v}) + } + protoassert.SlicesEqual(suite.T(), matched.AlertViolations, expectedMessages) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestK8sRBACField() { + deployments := make(map[string]*storage.Deployment) + for permissionLevelStr, permissionLevel := range storage.PermissionLevel_value { + dep := fixtures.GetDeployment().CloneVT() + dep.ServiceAccountPermissionLevel = storage.PermissionLevel(permissionLevel) + deployments[permissionLevelStr] = dep + } + + for _, testCase := range []struct { + value string + negate bool + expectedMatches []string + // Deployment ids to violations + expectedViolations map[string][]*storage.Alert_Violation + }{ + { + "DEFAULT", + false, + []string{"DEFAULT", "ELEVATED_IN_NAMESPACE", "ELEVATED_CLUSTER_WIDE", "CLUSTER_ADMIN"}, + map[string][]*storage.Alert_Violation{ + "DEFAULT": rbacPermissionMessage("DEFAULT"), + "ELEVATED_CLUSTER_WIDE": rbacPermissionMessage("ELEVATED_CLUSTER_WIDE"), + "ELEVATED_IN_NAMESPACE": rbacPermissionMessage("ELEVATED_IN_NAMESPACE"), + "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), + }, + }, + { + "ELEVATED_CLUSTER_WIDE", + false, + []string{"ELEVATED_CLUSTER_WIDE", "CLUSTER_ADMIN"}, + map[string][]*storage.Alert_Violation{ + "ELEVATED_CLUSTER_WIDE": rbacPermissionMessage("ELEVATED_CLUSTER_WIDE"), + "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), + }, + }, + { + "cluster_admin", + false, + []string{"CLUSTER_ADMIN"}, + map[string][]*storage.Alert_Violation{ + "CLUSTER_ADMIN": rbacPermissionMessage("CLUSTER_ADMIN"), + }, + }, + { + "ELEVATED_CLUSTER_WIDE", + true, + []string{"NONE", "DEFAULT", "ELEVATED_IN_NAMESPACE"}, + map[string][]*storage.Alert_Violation{ + "ELEVATED_IN_NAMESPACE": rbacPermissionMessage("ELEVATED_IN_NAMESPACE"), + "NONE": rbacPermissionMessage("NONE"), + "DEFAULT": rbacPermissionMessage("DEFAULT"), + }, + }, + } { + c := testCase + suite.T().Run(fmt.Sprintf("%+v", c.expectedMatches), func(t *testing.T) { + matcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.MinimumRBACPermissions, c.value, c.negate)) + require.NoError(t, err) + matched := set.NewStringSet() + for depRef, dep := range deployments { + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + matched.Add(depRef) + protoassert.SlicesEqual(t, violations.AlertViolations, c.expectedViolations[depRef]) + } else { + assert.Empty(t, c.expectedViolations[depRef]) + } + } + assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestPortExposure() { + deployments := make(map[string]*storage.Deployment) + for exposureLevelStr, exposureLevel := range storage.PortConfig_ExposureLevel_value { + dep := fixtures.GetDeployment().CloneVT() + dep.Ports = []*storage.PortConfig{{ExposureInfos: []*storage.PortConfig_ExposureInfo{{Level: storage.PortConfig_ExposureLevel(exposureLevel)}}}} + deployments[exposureLevelStr] = dep + } + + assertMessageMatches := func(t *testing.T, depRef string, violations []*storage.Alert_Violation) { + depRefToExpectedMsg := map[string]string{ + "EXTERNAL": "exposed with load balancer", + "NODE": "exposed on node port", + "INTERNAL": "using internal cluster IP", + "HOST": "exposed on host port", + "ROUTE": "exposed with a route", + } + require.Len(t, violations, 1) + assert.Equal(t, fmt.Sprintf("Deployment port(s) %s", depRefToExpectedMsg[depRef]), violations[0].GetMessage()) + } + + for _, testCase := range []struct { + values []string + negate bool + expectedMatches []string + }{ + { + []string{"external"}, + false, + []string{"EXTERNAL"}, + }, + { + []string{"external", "NODE"}, + false, + []string{"EXTERNAL", "NODE"}, + }, + { + []string{"external", "NODE"}, + true, + []string{"INTERNAL", "HOST", "ROUTE"}, + }, + } { + c := testCase + suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { + matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.PortExposure, c.values, c.negate, storage.BooleanOperator_OR)) + require.NoError(t, err) + matched := set.NewStringSet() + for depRef, dep := range deployments { + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + assertMessageMatches(t, depRef, violations.AlertViolations) + matched.Add(depRef) + } + } + assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestContainerName() { + var deps []*storage.Deployment + for _, containerName := range []string{ + "container_staging", + "container_prod0", + "container_prod1", + "container_internal", + "external_container", + } { + dep := fixtures.GetDeployment().CloneVT() + dep.Containers = []*storage.Container{ + { + Name: containerName, + }, + } + deps = append(deps, dep) + } + + for _, testCase := range []struct { + value string + expectedMatches []string + negate bool + }{ + { + value: "container_[a-z0-9]*", + expectedMatches: []string{"container_staging", "container_prod0", "container_prod1", "container_internal"}, + negate: false, + }, + { + value: "container_prod[a-z0-9]*", + expectedMatches: []string{"container_prod0", "container_prod1"}, + negate: false, + }, + { + value: ".*external.*", + expectedMatches: []string{"external_container"}, + negate: false, + }, + { + value: "doesnotexist", + expectedMatches: nil, + negate: false, + }, + { + value: ".*internal.*", + expectedMatches: []string{"container_staging", "container_prod0", "container_prod1", "external_container"}, + negate: true, + }, + } { + c := testCase + + suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { + depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.ContainerName, c.value, c.negate)) + require.NoError(t, err) + containerNameMatched := set.NewStringSet() + for _, dep := range deps { + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + // No match in case we are testing for doesnotexist + if len(violations.AlertViolations) > 0 { + containerNameMatched.Add(dep.GetContainers()[0].GetName()) + require.Len(t, violations.AlertViolations, 1) + assert.Equal(t, fmt.Sprintf("Container has name '%s'", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) + } + } + assert.ElementsMatch(t, containerNameMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", containerNameMatched.AsSlice(), c.value, c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestAllowPrivilegeEscalationPolicyCriteria() { + const containerAllowPrivEsc = "Container with Privilege Escalation allowed" + const containerNotAllowPrivEsc = "Container with Privilege Escalation not allowed" + + var deps []*storage.Deployment + for _, d := range []struct { + ContainerName string + AllowPrivilegeEscalation bool + }{ + { + ContainerName: containerAllowPrivEsc, + AllowPrivilegeEscalation: true, + }, + { + ContainerName: containerNotAllowPrivEsc, + AllowPrivilegeEscalation: false, + }, + } { + dep := fixtures.GetDeployment().CloneVT() + dep.Containers[0].Name = d.ContainerName + if d.AllowPrivilegeEscalation { + dep.Containers[0].SecurityContext.AllowPrivilegeEscalation = d.AllowPrivilegeEscalation + } + deps = append(deps, dep) + } + + for _, testCase := range []struct { + CaseName string + value string + expectedMatches []string + }{ + { + CaseName: "Policy for containers with privilege escalation allowed", + value: "true", + expectedMatches: []string{containerAllowPrivEsc}, + }, + { + CaseName: "Policy for containers with privilege escalation not allowed", + value: "false", + expectedMatches: []string{containerNotAllowPrivEsc}, + }, + } { + c := testCase + + suite.T().Run(c.CaseName, func(t *testing.T) { + depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.AllowPrivilegeEscalation, c.value, false)) + require.NoError(t, err) + containerNameMatched := set.NewStringSet() + for _, dep := range deps { + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + containerNameMatched.Add(dep.GetContainers()[0].GetName()) + require.Len(t, violations.AlertViolations, 1) + if c.value == "true" { + assert.Equal(t, fmt.Sprintf("Container '%s' allows privilege escalation", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) + } else { + assert.Equal(t, fmt.Sprintf("Container '%s' does not allow privilege escalation", dep.GetContainers()[0].GetName()), violations.AlertViolations[0].GetMessage()) + } + } + } + assert.ElementsMatch(t, containerNameMatched.AsSlice(), c.expectedMatches, "Matched containers %v for policy %v; expected: %v", containerNameMatched.AsSlice(), c.value, c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestAutomountServiceAccountToken() { + deployments := make(map[string]*storage.Deployment) + for _, d := range []struct { + DeploymentName string + ServiceAccountName string + AutomountServiceAccountTokens bool + }{ + { + DeploymentName: "DefaultSAAutomountedTokens", + ServiceAccountName: "default", + AutomountServiceAccountTokens: true, + }, + { + DeploymentName: "DefaultSANotAutomountedTokens", + ServiceAccountName: "default", + }, + { + DeploymentName: "CustomSAAutomountedTokens", + ServiceAccountName: "custom", + AutomountServiceAccountTokens: true, + }, + { + DeploymentName: "CustomSANotAutomountedTokens", + ServiceAccountName: "custom", + }, + } { + dep := fixtures.GetDeployment().CloneVT() + dep.Name = d.DeploymentName + dep.ServiceAccount = d.ServiceAccountName + dep.AutomountServiceAccountToken = d.AutomountServiceAccountTokens + deployments[dep.GetName()] = dep + } + + automountServiceAccountTokenPolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.AutomountServiceAccountToken, + Values: []*storage.PolicyValue{{Value: "true"}}, + } + defaultServiceAccountPolicyGroup := &storage.PolicyGroup{ + FieldName: fieldnames.ServiceAccount, + Values: []*storage.PolicyValue{{Value: "default"}}, + } + + allAutomountServiceAccountTokenPolicy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, automountServiceAccountTokenPolicyGroup) + defaultAutomountServiceAccountTokenPolicy := policyWithGroups(storage.EventSource_NOT_APPLICABLE, automountServiceAccountTokenPolicyGroup, defaultServiceAccountPolicyGroup) + + automountAlert := &storage.Alert_Violation{Message: "Deployment mounts the service account tokens."} + defaultServiceAccountAlert := &storage.Alert_Violation{Message: "Service Account is set to 'default'"} + + for _, c := range []struct { + CaseName string + Policy *storage.Policy + DeploymentName string + ExpectedAlerts []*storage.Alert_Violation + }{ + { + CaseName: "Automounted default service account tokens should alert on bare automount policy", + Policy: allAutomountServiceAccountTokenPolicy, + DeploymentName: "DefaultSAAutomountedTokens", + ExpectedAlerts: []*storage.Alert_Violation{automountAlert}, + }, + { + CaseName: "Automounted default service account tokens should alert on default only automount policy", + Policy: defaultAutomountServiceAccountTokenPolicy, + DeploymentName: "DefaultSAAutomountedTokens", + ExpectedAlerts: []*storage.Alert_Violation{automountAlert, defaultServiceAccountAlert}, + }, + { + CaseName: "Automounted custom service account tokens should alert on bare automount policy", + Policy: allAutomountServiceAccountTokenPolicy, + DeploymentName: "CustomSAAutomountedTokens", + ExpectedAlerts: []*storage.Alert_Violation{automountAlert}, + }, + { + CaseName: "Not automounted default service account should not alert on bare automount policy", + Policy: allAutomountServiceAccountTokenPolicy, + DeploymentName: "DefaultSANotAutomountedTokens", + }, + { + CaseName: "Not automounted custom service account should not alert on bare automount policy", + Policy: allAutomountServiceAccountTokenPolicy, + DeploymentName: "CustomSANotAutomountedTokens", + }, + } { + suite.T().Run(c.CaseName, func(t *testing.T) { + dep := deployments[c.DeploymentName] + matcher, err := BuildDeploymentMatcher(c.Policy) + suite.NoError(err, "deployment matcher creation must succeed") + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + suite.NoError(err, "deployment matcher run must succeed") + suite.Empty(violations.ProcessViolation) + protoassert.SlicesEqual(suite.T(), c.ExpectedAlerts, violations.AlertViolations) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestRuntimeClass() { + var deps []*storage.Deployment + for _, runtimeClass := range []string{ + "", + "blah", + } { + dep := fixtures.GetDeployment().CloneVT() + dep.RuntimeClass = runtimeClass + deps = append(deps, dep) + } + + for _, testCase := range []struct { + value string + negate bool + expectedMatches []string + }{ + { + value: ".*", + negate: false, + expectedMatches: []string{"", "blah"}, + }, + { + value: ".+", + negate: false, + expectedMatches: []string{"blah"}, + }, + { + value: ".+", + negate: true, + expectedMatches: []string{""}, + }, + { + value: "blah", + negate: true, + expectedMatches: []string{""}, + }, + } { + c := testCase + + suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { + depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.RuntimeClass, c.value, c.negate)) + require.NoError(t, err) + matchedRuntimeClasses := set.NewStringSet() + for _, dep := range deps { + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + matchedRuntimeClasses.Add(dep.GetRuntimeClass()) + require.Len(t, violations.AlertViolations, 1) + assert.Equal(t, fmt.Sprintf("Runtime Class is set to '%s'", dep.GetRuntimeClass()), violations.AlertViolations[0].GetMessage()) + } + } + assert.ElementsMatch(t, matchedRuntimeClasses.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", matchedRuntimeClasses.AsSlice(), c.value, c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestNamespace() { + var deps []*storage.Deployment + for _, namespace := range []string{ + "dep_staging", + "dep_prod0", + "dep_prod1", + "dep_internal", + "external_dep", + } { + dep := fixtures.GetDeployment().CloneVT() + dep.Namespace = namespace + deps = append(deps, dep) + } + + for _, testCase := range []struct { + value string + expectedMatches []string + negate bool + }{ + { + value: "dep_[a-z0-9]*", + expectedMatches: []string{"dep_staging", "dep_prod0", "dep_prod1", "dep_internal"}, + negate: false, + }, + { + value: "dep_prod[a-z0-9]*", + expectedMatches: []string{"dep_prod0", "dep_prod1"}, + negate: false, + }, + { + value: ".*external.*", + expectedMatches: []string{"external_dep"}, + negate: false, + }, + { + value: "doesnotexist", + expectedMatches: nil, + negate: false, + }, + { + value: ".*internal.*", + expectedMatches: []string{"dep_staging", "dep_prod0", "dep_prod1", "external_dep"}, + negate: true, + }, + } { + c := testCase + + suite.T().Run(fmt.Sprintf("DeploymentMatcher %+v", c), func(t *testing.T) { + depMatcher, err := BuildDeploymentMatcher(policyWithSingleKeyValue(fieldnames.Namespace, c.value, c.negate)) + require.NoError(t, err) + namespacesMatched := set.NewStringSet() + for _, dep := range deps { + violations, err := depMatcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + // No match in case we are testing for doesnotexist + if len(violations.AlertViolations) > 0 { + namespacesMatched.Add(dep.GetNamespace()) + require.Len(t, violations.AlertViolations, 1) + assert.Equal(t, fmt.Sprintf("Namespace has name '%s'", dep.GetNamespace()), violations.AlertViolations[0].GetMessage()) + } + } + assert.ElementsMatch(t, namespacesMatched.AsSlice(), c.expectedMatches, "Got %v for policy %v; expected: %v", namespacesMatched.AsSlice(), c.value, c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestDropCaps() { + testCaps := []string{"SYS_MODULE", "SYS_NICE", "SYS_PTRACE", "ALL"} + + deployments := make(map[string]*storage.Deployment) + for _, idxs := range [][]int{{}, {0}, {1}, {2}, {0, 1}, {1, 2}, {0, 1, 2}, {3}} { + dep := fixtures.GetDeployment().CloneVT() + dep.Containers[0].SecurityContext.DropCapabilities = make([]string, 0, len(idxs)) + for _, idx := range idxs { + dep.Containers[0].SecurityContext.DropCapabilities = append(dep.Containers[0].SecurityContext.DropCapabilities, testCaps[idx]) + } + deployments[strings.ReplaceAll(strings.Join(dep.GetContainers()[0].GetSecurityContext().GetDropCapabilities(), ","), "SYS_", "")] = dep + } + + assertMessageMatches := func(t *testing.T, depRef string, violations []*storage.Alert_Violation) { + depRefToExpectedMsg := map[string]string{ + "": "no capabilities", + "ALL": "all capabilities", + "MODULE": "SYS_MODULE", + "NICE": "SYS_NICE", + "PTRACE": "SYS_PTRACE", + "MODULE,NICE": "SYS_MODULE and SYS_NICE", + "NICE,PTRACE": "SYS_NICE and SYS_PTRACE", + "MODULE,NICE,PTRACE": "SYS_MODULE, SYS_NICE, and SYS_PTRACE", + } + require.Len(t, violations, 1) + assert.Equal(t, fmt.Sprintf("Container 'nginx110container' does not drop expected capabilities (drops %s)", depRefToExpectedMsg[depRef]), violations[0].GetMessage()) + } + + for _, testCase := range []struct { + values []string + op storage.BooleanOperator + expectedMatches []string + }{ + { + // Nothing drops this capability + []string{"SYSLOG"}, + storage.BooleanOperator_OR, + []string{"", "MODULE", "NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, + }, + { + []string{"SYS_NICE"}, + storage.BooleanOperator_OR, + []string{"", "MODULE", "PTRACE"}, + }, + { + []string{"SYS_NICE", "SYS_PTRACE"}, + storage.BooleanOperator_OR, + []string{"", "MODULE"}, + }, + { + []string{"SYS_NICE", "SYS_PTRACE"}, + storage.BooleanOperator_AND, + []string{"", "MODULE", "PTRACE", "NICE", "MODULE,NICE"}, + }, + { + []string{"ALL"}, + storage.BooleanOperator_AND, + []string{"", "MODULE", "NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, + }, + } { + c := testCase + suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { + matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.DropCaps, c.values, false, c.op)) + require.NoError(t, err) + matched := set.NewStringSet() + for depRef, dep := range deployments { + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + matched.Add(depRef) + assertMessageMatches(t, depRef, violations.AlertViolations) + } + } + assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestAddCaps() { + testCaps := []string{"SYS_MODULE", "SYS_NICE", "SYS_PTRACE"} + + deployments := make(map[string]*storage.Deployment) + for _, idxs := range [][]int{{}, {0}, {1}, {2}, {0, 1}, {1, 2}, {0, 1, 2}} { + dep := fixtures.GetDeployment().CloneVT() + dep.Containers[0].SecurityContext.AddCapabilities = make([]string, 0, len(idxs)) + for _, idx := range idxs { + dep.Containers[0].SecurityContext.AddCapabilities = append(dep.Containers[0].SecurityContext.AddCapabilities, testCaps[idx]) + } + deployments[strings.ReplaceAll(strings.Join(dep.GetContainers()[0].GetSecurityContext().GetAddCapabilities(), ","), "SYS_", "")] = dep + } + + for _, testCase := range []struct { + values []string + op storage.BooleanOperator + expectedMatches []string + }{ + { + // Nothing adds this capability + []string{"SYSLOG"}, + storage.BooleanOperator_OR, + []string{}, + }, + { + []string{"SYS_NICE"}, + storage.BooleanOperator_OR, + []string{"NICE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, + }, + { + []string{"SYS_NICE", "SYS_PTRACE"}, + storage.BooleanOperator_OR, + []string{"NICE", "PTRACE", "MODULE,NICE", "NICE,PTRACE", "MODULE,NICE,PTRACE"}, + }, + { + []string{"SYS_NICE", "SYS_PTRACE"}, + storage.BooleanOperator_AND, + []string{"NICE,PTRACE", "MODULE,NICE,PTRACE"}, + }, + } { + c := testCase + suite.T().Run(fmt.Sprintf("%+v", c), func(t *testing.T) { + matcher, err := BuildDeploymentMatcher(policyWithSingleFieldAndValues(fieldnames.AddCaps, c.values, false, c.op)) + require.NoError(t, err) + matched := set.NewStringSet() + for depRef, dep := range deployments { + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(dep, suite.getImagesForDeployment(dep))) + require.NoError(t, err) + if len(violations.AlertViolations) > 0 { + matched.Add(depRef) + require.Len(t, violations.AlertViolations, 1) + } + } + assert.ElementsMatch(t, matched.AsSlice(), c.expectedMatches, "Got %v, expected: %v", matched.AsSlice(), c.expectedMatches) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestReplicasPolicyCriteria() { + for _, testCase := range []struct { + caseName string + replicas int64 + policyValue string + negate bool + alerts []*storage.Alert_Violation + }{ + { + caseName: "Should raise when replicas==5.", + replicas: 5, + policyValue: "5", + negate: false, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, + }, + { + caseName: "Should not raise unless replicas==3.", + replicas: 5, + policyValue: "3", + negate: false, + alerts: nil, + }, + { + caseName: "Should raise unless replicas==3.", + replicas: 5, + policyValue: "3", + negate: true, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, + }, + { + caseName: "Should raise when replicas>=5.", + replicas: 5, + policyValue: ">=5", + negate: false, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, + }, + { + caseName: "Should raise when replicas<=5.", + replicas: 5, + policyValue: "<=5", + negate: false, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '5'"}}, + }, + { + caseName: "Should raise when replicas<5.", + replicas: 1, + policyValue: "<5", + negate: false, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '1'"}}, + }, + { + caseName: "Should raise when replicas>5.", + replicas: 10, + policyValue: ">5", + negate: false, + alerts: []*storage.Alert_Violation{{Message: "Replicas is set to '10'"}}, + }, + } { + suite.Run(testCase.caseName, func() { + deployment := fixtures.GetDeployment().CloneVT() + deployment.Replicas = testCase.replicas + policy := policyWithSingleKeyValue(fieldnames.Replicas, testCase.policyValue, testCase.negate) + + matcher, err := BuildDeploymentMatcher(policy) + suite.NoError(err, "deployment matcher creation must succeed") + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + suite.NoError(err, "deployment matcher run must succeed") + + suite.Empty(violations.ProcessViolation) + protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestLivenessProbePolicyCriteria() { + for _, testCase := range []struct { + caseName string + containers []*storage.Container + policyValue string + alerts []*storage.Alert_Violation + }{ + { + caseName: "Should raise alert since liveness probe is defined.", + containers: []*storage.Container{ + {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: true}}, + }, + policyValue: "true", + alerts: []*storage.Alert_Violation{ + {Message: "Liveness probe is defined for container 'container'"}, + }, + }, + { + caseName: "Should not raise alert since liveness probe is defined.", + containers: []*storage.Container{ + {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: true}}, + }, + policyValue: "false", + alerts: nil, + }, + { + caseName: "Should not raise alert since liveness probe is not defined.", + containers: []*storage.Container{ + {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: false}}, + }, + policyValue: "true", + alerts: nil, + }, + { + caseName: "Should raise alert since liveness probe is not defined.", + containers: []*storage.Container{ + {Name: "container", LivenessProbe: &storage.LivenessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Liveness probe is not defined for container 'container'"}, + }, + }, + { + caseName: "Should raise alert for both containers.", + containers: []*storage.Container{ + {Name: "container-1", LivenessProbe: &storage.LivenessProbe{Defined: false}}, + {Name: "container-2", LivenessProbe: &storage.LivenessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Liveness probe is not defined for container 'container-1'"}, + {Message: "Liveness probe is not defined for container 'container-2'"}, + }, + }, + { + caseName: "Should raise alert only for container-2.", + containers: []*storage.Container{ + {Name: "container-1", LivenessProbe: &storage.LivenessProbe{Defined: true}}, + {Name: "container-2", LivenessProbe: &storage.LivenessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Liveness probe is not defined for container 'container-2'"}, + }, + }, + } { + suite.Run(testCase.caseName, func() { + deployment := fixtures.GetDeployment().CloneVT() + deployment.Containers = testCase.containers + policy := policyWithSingleKeyValue(fieldnames.LivenessProbeDefined, testCase.policyValue, false) + + matcher, err := BuildDeploymentMatcher(policy) + suite.NoError(err, "deployment matcher creation must succeed") + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + suite.NoError(err, "deployment matcher run must succeed") + + suite.Empty(violations.ProcessViolation) + protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) + }) + } +} + +func (suite *WorkloadCriteriaTestSuite) TestReadinessProbePolicyCriteria() { + for _, testCase := range []struct { + caseName string + containers []*storage.Container + policyValue string + alerts []*storage.Alert_Violation + }{ + { + caseName: "Should raise alert since readiness probe is defined.", + containers: []*storage.Container{ + {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, + }, + policyValue: "true", + alerts: []*storage.Alert_Violation{ + {Message: "Readiness probe is defined for container 'container'"}, + }, + }, + { + caseName: "Should not raise alert since readiness probe is defined.", + containers: []*storage.Container{ + {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, + }, + policyValue: "false", + alerts: nil, + }, + { + caseName: "Should not raise alert since readiness probe is not defined.", + containers: []*storage.Container{ + {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, + }, + policyValue: "true", + alerts: nil, + }, + { + caseName: "Should raise alert since readiness probe is not defined.", + containers: []*storage.Container{ + {Name: "container", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Readiness probe is not defined for container 'container'"}, + }, + }, + { + caseName: "Should raise alert for both containers.", + containers: []*storage.Container{ + {Name: "container-1", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, + {Name: "container-2", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Readiness probe is not defined for container 'container-1'"}, + {Message: "Readiness probe is not defined for container 'container-2'"}, + }, + }, + { + caseName: "Should raise alert only for container-2.", + containers: []*storage.Container{ + {Name: "container-1", ReadinessProbe: &storage.ReadinessProbe{Defined: true}}, + {Name: "container-2", ReadinessProbe: &storage.ReadinessProbe{Defined: false}}, + }, + policyValue: "false", + alerts: []*storage.Alert_Violation{ + {Message: "Readiness probe is not defined for container 'container-2'"}, + }, + }, + } { + suite.Run(testCase.caseName, func() { + deployment := fixtures.GetDeployment().CloneVT() + deployment.Containers = testCase.containers + policy := policyWithSingleKeyValue(fieldnames.ReadinessProbeDefined, testCase.policyValue, false) + + matcher, err := BuildDeploymentMatcher(policy) + suite.NoError(err, "deployment matcher creation must succeed") + violations, err := matcher.MatchDeployment(nil, enhancedDeployment(deployment, suite.getImagesForDeployment(deployment))) + suite.NoError(err, "deployment matcher run must succeed") + + suite.Empty(violations.ProcessViolation) + protoassert.SlicesEqual(suite.T(), violations.AlertViolations, testCase.alerts) + }) + } +} diff --git a/tools/allowed-large-files b/tools/allowed-large-files index 28f29da6aa7b4..2ba1359c5492a 100644 --- a/tools/allowed-large-files +++ b/tools/allowed-large-files @@ -30,7 +30,8 @@ operator/config/crd/bases/*.yaml operator/config/manifests/bases/*.yaml operator/install/manifest.yaml operator/tests/img/*.png -pkg/booleanpolicy/default_policies_test.go +pkg/booleanpolicy/default_policies_builtin_test.go +pkg/booleanpolicy/runtime_criteria_test.go pkg/branding/files/red_hat_acs_logo_rgb.png pkg/branding/files/stackrox_integration_logo.png pkg/continuousprofiling/docs/img/*.png From f7d71220e733ea87d01b5005e16a85aa68089868 Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Wed, 18 Feb 2026 15:27:53 +0000 Subject: [PATCH 217/232] perf(net): optimize isPublic() with scalar bitwise operations (#19064) Signed-off-by: Tomasz Janiszewski Co-authored-by: Claude Sonnet 4.5 --- pkg/net/addr.go | 17 +--- pkg/net/addr_bench_test.go | 104 +++++++++++++++++++++++ pkg/net/internal/ipcheck/ipcheck.go | 77 +++++++++++++++++ pkg/net/internal/ipcheck/ipcheck_test.go | 102 ++++++++++++++++++++++ 4 files changed, 286 insertions(+), 14 deletions(-) create mode 100644 pkg/net/addr_bench_test.go create mode 100644 pkg/net/internal/ipcheck/ipcheck.go create mode 100644 pkg/net/internal/ipcheck/ipcheck_test.go diff --git a/pkg/net/addr.go b/pkg/net/addr.go index 2cfe81853962d..ca568076d2e59 100644 --- a/pkg/net/addr.go +++ b/pkg/net/addr.go @@ -4,6 +4,7 @@ import ( "bytes" "net" + "github.com/stackrox/rox/pkg/net/internal/ipcheck" "github.com/stackrox/rox/pkg/netutil" ) @@ -65,13 +66,7 @@ func (d ipv4data) isLoopback() bool { } func (d ipv4data) isPublic() bool { - netIP := net.IP(d.bytes()) - for _, privateIPNet := range netutil.IPv4PrivateNetworks { - if privateIPNet.Contains(netIP) { - return false - } - } - return true + return ipcheck.IsIPv4Public(d) } func (d ipv4data) canonicalize() ipAddrData { @@ -103,13 +98,7 @@ func (d ipv6data) isLoopback() bool { } func (d ipv6data) isPublic() bool { - netIP := net.IP(d.bytes()) - for _, privateIPNet := range netutil.IPv6PrivateNetworks { - if privateIPNet.Contains(netIP) { - return false - } - } - return true + return ipcheck.IsIPv6Public(d) } var ( diff --git a/pkg/net/addr_bench_test.go b/pkg/net/addr_bench_test.go new file mode 100644 index 0000000000000..7ccad0281287d --- /dev/null +++ b/pkg/net/addr_bench_test.go @@ -0,0 +1,104 @@ +package net + +import ( + "math/rand" + "strconv" + "testing" +) + +// BenchmarkIsPublic benchmarks individual IP classification +func BenchmarkIsPublic(b *testing.B) { + testCases := []struct { + name string + ip string + }{ + // Public IPs (worst case - checks all 5 networks) + {"PublicIPv4", "8.8.8.8"}, + {"PublicIPv4_AWS", "54.239.28.85"}, + + // Private IPs (varying early-exit positions) + {"PrivateIPv4_10", "10.1.2.3"}, + {"PrivateIPv4_192", "192.168.1.1"}, + {"PrivateIPv4_172", "172.16.0.1"}, + {"PrivateIPv4_100", "100.64.0.1"}, + {"PrivateIPv4_169", "169.254.1.1"}, + + // IPv6 + {"PublicIPv6", "2001:4860:4860::8888"}, + {"PrivateIPv6_ULA", "fd00::1"}, + {"PrivateIPv6_LinkLocal", "fe80::1"}, + + // IPv4-mapped IPv6 + {"IPv4Mapped_Public", "::ffff:8.8.8.8"}, + {"IPv4Mapped_Private", "::ffff:10.1.1.1"}, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + addr := ParseIP(tc.ip) + for b.Loop() { + _ = addr.IsPublic() + } + }) + } +} + +// BenchmarkIsPublicBatch simulates network flow manager workload +func BenchmarkIsPublicBatch(b *testing.B) { + ips := generateMixedIPs(1000) + for b.Loop() { + for _, ip := range ips { + _ = ip.IsPublic() + } + } +} + +// BenchmarkIsPublicWorstCase: all public IPs (no early exit) +func BenchmarkIsPublicWorstCase(b *testing.B) { + ips := generatePublicIPs(1000) + for b.Loop() { + for _, ip := range ips { + _ = ip.IsPublic() + } + } +} + +// Helper functions +func generateMixedIPs(count int) []IPAddress { + ips := make([]IPAddress, count) + for i := range count { + if i%2 == 0 { + ips[i] = ParseIP(randomPublicIPv4()) + } else { + ips[i] = ParseIP(randomPrivateIPv4()) + } + } + return ips +} + +func generatePublicIPs(count int) []IPAddress { + ips := make([]IPAddress, count) + for i := range count { + ips[i] = ParseIP(randomPublicIPv4()) + } + return ips +} + +func randomPublicIPv4() string { + return "8." + randOctet() + "." + randOctet() + "." + randOctet() +} + +func randomPrivateIPv4() string { + switch rand.Intn(3) { + case 0: + return "10." + randOctet() + "." + randOctet() + "." + randOctet() + case 1: + return "192.168." + randOctet() + "." + randOctet() + default: + return "172.16." + randOctet() + "." + randOctet() + } +} + +func randOctet() string { + return strconv.Itoa(rand.Intn(256)) +} diff --git a/pkg/net/internal/ipcheck/ipcheck.go b/pkg/net/internal/ipcheck/ipcheck.go new file mode 100644 index 0000000000000..b1f22c01bb26e --- /dev/null +++ b/pkg/net/internal/ipcheck/ipcheck.go @@ -0,0 +1,77 @@ +package ipcheck + +import ( + "encoding/binary" + + "github.com/stackrox/rox/pkg/netutil" +) + +// IPv4 private network masks and prefixes (generated from netutil.IPv4PrivateNetworks) +// This maintains a single source of truth while providing optimized constants +var ( + ipv4Masks []uint32 + ipv4Prefixes []uint32 +) + +func init() { + // Generate masks and prefixes from the canonical definitions in netutil + ipv4Masks = make([]uint32, 0, len(netutil.IPv4PrivateNetworks)) + ipv4Prefixes = make([]uint32, 0, len(netutil.IPv4PrivateNetworks)) + for _, ipNet := range netutil.IPv4PrivateNetworks { + // Extract mask as uint32 + maskBytes := ipNet.Mask + if len(maskBytes) != 4 { + panic("IPv4 network has invalid mask length") + } + ipv4Masks = append(ipv4Masks, binary.BigEndian.Uint32(maskBytes)) + + // Extract network prefix as uint32 + ipBytes := ipNet.IP.To4() + if ipBytes == nil { + panic("IPv4 network has invalid IP") + } + ipv4Prefixes = append(ipv4Prefixes, binary.BigEndian.Uint32(ipBytes)) + } +} + +// IsIPv4Public returns true if the IPv4 address is public (not in private ranges). +// Input is 4-byte array representing IPv4 address. +func IsIPv4Public(ip [4]byte) bool { + // Convert to uint32 in network byte order (big-endian) + ipInt := binary.BigEndian.Uint32(ip[:]) + + // Check each private network range + for i := range len(ipv4Masks) { + if (ipInt & ipv4Masks[i]) == ipv4Prefixes[i] { + return false // Is private + } + } + + return true // Is public +} + +// IsIPv6Public returns true if the IPv6 address is public (not in private ranges). +// Input is 16-byte array representing IPv6 address. +func IsIPv6Public(ip [16]byte) bool { + // Check fd00::/8 (Unique Local Address) + if ip[0] == 0xfd { + return false + } + + // Check fe80::/10 (Link-Local) + if ip[0] == 0xfe && (ip[1]&0xc0) == 0x80 { + return false + } + + // Check IPv4-mapped IPv6 (::ffff:0:0/96) + if ip[0] == 0 && ip[1] == 0 && ip[2] == 0 && ip[3] == 0 && + ip[4] == 0 && ip[5] == 0 && ip[6] == 0 && ip[7] == 0 && + ip[8] == 0 && ip[9] == 0 && ip[10] == 0xff && ip[11] == 0xff { + // Extract IPv4 part and check if private + var ipv4 [4]byte + copy(ipv4[:], ip[12:16]) + return IsIPv4Public(ipv4) + } + + return true // Is public +} diff --git a/pkg/net/internal/ipcheck/ipcheck_test.go b/pkg/net/internal/ipcheck/ipcheck_test.go new file mode 100644 index 0000000000000..be7edb88ef864 --- /dev/null +++ b/pkg/net/internal/ipcheck/ipcheck_test.go @@ -0,0 +1,102 @@ +package ipcheck + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsIPv4Public(t *testing.T) { + tests := []struct { + name string + ip [4]byte + expected bool + }{ + // Public IPs + {"Google DNS", [4]byte{8, 8, 8, 8}, true}, + {"Cloudflare", [4]byte{1, 1, 1, 1}, true}, + {"AWS", [4]byte{54, 239, 28, 85}, true}, + + // Private IPs - 10.0.0.0/8 + {"10.0.0.1", [4]byte{10, 0, 0, 1}, false}, + {"10.255.255.254", [4]byte{10, 255, 255, 254}, false}, + + // Private IPs - 192.168.0.0/16 + {"192.168.0.1", [4]byte{192, 168, 0, 1}, false}, + {"192.168.255.255", [4]byte{192, 168, 255, 255}, false}, + + // Private IPs - 172.16.0.0/12 + {"172.16.0.1", [4]byte{172, 16, 0, 1}, false}, + {"172.31.255.254", [4]byte{172, 31, 255, 254}, false}, + + // Private IPs - 100.64.0.0/10 + {"100.64.0.1", [4]byte{100, 64, 0, 1}, false}, + {"100.127.255.254", [4]byte{100, 127, 255, 254}, false}, + + // Private IPs - 169.254.0.0/16 + {"169.254.0.1", [4]byte{169, 254, 0, 1}, false}, + {"169.254.255.254", [4]byte{169, 254, 255, 254}, false}, + + // Boundary testing + {"9.255.255.255 (before 10.x)", [4]byte{9, 255, 255, 255}, true}, + {"11.0.0.0 (after 10.x)", [4]byte{11, 0, 0, 0}, true}, + {"172.15.255.255 (before 172.16.x)", [4]byte{172, 15, 255, 255}, true}, + {"172.32.0.0 (after 172.31.x)", [4]byte{172, 32, 0, 0}, true}, + + // Special addresses + {"127.0.0.1 (localhost)", [4]byte{127, 0, 0, 1}, true}, // Loopback is public per isPublic semantics + {"0.0.0.0", [4]byte{0, 0, 0, 0}, true}, + {"255.255.255.255", [4]byte{255, 255, 255, 255}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsIPv4Public(tt.ip) + assert.Equal(t, tt.expected, result, + "IsIPv4Public(%v) = %v, expected %v", + net.IP(tt.ip[:]), result, tt.expected) + }) + } +} + +func TestIsIPv6Public(t *testing.T) { + tests := []struct { + name string + ipStr string + expected bool + }{ + // Public IPv6 + {"Google DNS", "2001:4860:4860::8888", true}, + {"Cloudflare", "2606:4700:4700::1111", true}, + + // Private IPv6 - ULA (fd00::/8) + {"ULA", "fd00::1", false}, + {"ULA with data", "fd12:3456:789a:1::1", false}, + + // Private IPv6 - Link-Local (fe80::/10) + {"Link-Local", "fe80::1", false}, + {"Link-Local with data", "fe80::250:56ff:fe9a:8f73", false}, + + // IPv4-mapped IPv6 + {"IPv4-mapped public", "::ffff:8.8.8.8", true}, + {"IPv4-mapped private 10.x", "::ffff:10.1.1.1", false}, + {"IPv4-mapped private 192.168.x", "::ffff:192.168.1.1", false}, + + // IPv6 loopback (::1) + {"Loopback", "::1", true}, // Loopback is public per isPublic semantics + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ip := net.ParseIP(tt.ipStr) + var ipv6 [16]byte + copy(ipv6[:], ip.To16()) + + result := IsIPv6Public(ipv6) + assert.Equal(t, tt.expected, result, + "IsIPv6Public(%s) = %v, expected %v", + tt.ipStr, result, tt.expected) + }) + } +} From 469ca1dd57d1638f43c993499479c3dc26422863 Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Wed, 18 Feb 2026 16:16:53 +0000 Subject: [PATCH 218/232] ROX-32914: restrict auto approve to Dependabot PRs (#19041) Signed-off-by: Tomasz Janiszewski Co-authored-by: Claude Sonnet 4.5 --- .github/dependabot.yaml | 2 +- .github/workflows/auto-merge.yml | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 7c8f2504f6a93..b87380882173e 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -259,7 +259,7 @@ updates: labels: - "dependencies" - "area/operator" - - "auto-merge-any" # these images do not follow semver + - "auto-merge" - "auto-retest" commit-message: include: scope diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 5789969c2a8da..15f3044d9a0a8 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -32,17 +32,15 @@ jobs: echo "::notice::Querying PRs with auto-merge labels" - # Get all open PRs with auto-merge or auto-merge-any labels + # Get all PRs with auto-merge labels (non-draft, mergeable only) PR_DATA=$(gh pr list \ --repo "${{ github.repository }}" \ + --label "auto-merge" \ + --draft=false \ --state open \ --limit 50 \ - --json number,labels,isDraft,mergeable,author \ - --jq ".[] | select( - .isDraft == false and - .mergeable == \"MERGEABLE\" and - (.labels | map(.name) | any(. == \"auto-merge\" or . == \"auto-merge-any\")) - ) | {number, labels: [.labels[].name], author: .author.login}") + --json number,mergeable,author \ + --jq ".[] | select(.mergeable == \"MERGEABLE\") | {number, author: .author.login}") if [[ -z "$PR_DATA" ]]; then echo "::notice::No eligible PRs found with auto-merge labels" @@ -53,9 +51,8 @@ jobs: echo "$PR_DATA" | jq -c '.' | while read -r PR_JSON; do PR_NUMBER=$(echo "$PR_JSON" | jq -r '.number') AUTHOR=$(echo "$PR_JSON" | jq -r '.author') - LABELS=$(echo "$PR_JSON" | jq -r '.labels | join(",")') - echo "::notice::Processing PR #$PR_NUMBER (author=$AUTHOR, labels=$LABELS)" + echo "::notice::Processing PR #$PR_NUMBER (author=$AUTHOR)" # Check if all checks have passed using GraphQL statusCheckRollup STATUS=$(gh api graphql -F owner="$OWNER" -F repo="$REPO" -F number="$PR_NUMBER" -f query=" @@ -84,13 +81,17 @@ jobs: continue fi + # Enable auto-merge for all PRs with the label echo "::notice::Enabling auto-merge for PR #$PR_NUMBER" gh pr merge --repo "${{ github.repository }}" \ --auto --squash "$PR_NUMBER" - echo "::notice::Approving PR #$PR_NUMBER" - gh pr review --repo "${{ github.repository }}" \ - --approve "$PR_NUMBER" || true + # Auto-approve only Dependabot PRs + if [[ "$AUTHOR" == "app/dependabot" ]]; then + echo "::notice::Approving Dependabot PR #$PR_NUMBER" + gh pr review --repo "${{ github.repository }}" \ + --approve "$PR_NUMBER" || true + fi echo "::notice::✓ Auto-merge enabled for PR #$PR_NUMBER" done From ac2b052f6ba8ff154306f4c4f2e7561b32950d45 Mon Sep 17 00:00:00 2001 From: "J. Victor Martins" Date: Wed, 18 Feb 2026 09:48:18 -0800 Subject: [PATCH 219/232] chore(deps): bump openshift-golang-builder to Go 1.25.7 (#19066) --- image/rhel/konflux.Dockerfile | 2 +- image/roxctl/konflux.Dockerfile | 2 +- operator/konflux.Dockerfile | 2 +- scanner/image/scanner/konflux.Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/image/rhel/konflux.Dockerfile b/image/rhel/konflux.Dockerfile index c4d973e41de54..b6fc4e150c507 100644 --- a/image/rhel/konflux.Dockerfile +++ b/image/rhel/konflux.Dockerfile @@ -1,7 +1,7 @@ ARG PG_VERSION=15 -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:527782f4a0270f786192281f68d0374f4a21b3ab759643eee4bfcafb6f539468 AS go-builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:aa03597ee8c7594ffecef5cbb6a0f059d362259d2a41225617b27ec912a3d0d3 AS go-builder RUN dnf -y install --allowerasing jq diff --git a/image/roxctl/konflux.Dockerfile b/image/roxctl/konflux.Dockerfile index 3ee1ea2c63cf0..6e828886ba076 100644 --- a/image/roxctl/konflux.Dockerfile +++ b/image/roxctl/konflux.Dockerfile @@ -4,7 +4,7 @@ # - https://issues.redhat.com/browse/RHTAPBUGS-864 - deprecated-base-image-check behaves incorrectly. # - https://issues.redhat.com/browse/RHTAPBUGS-865 - openshift-golang-builder is not considered to be a valid base image. # -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:527782f4a0270f786192281f68d0374f4a21b3ab759643eee4bfcafb6f539468 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:aa03597ee8c7594ffecef5cbb6a0f059d362259d2a41225617b27ec912a3d0d3 AS builder WORKDIR /go/src/github.com/stackrox/rox/app diff --git a/operator/konflux.Dockerfile b/operator/konflux.Dockerfile index 28ac4bf1b525d..9357f8e9c84c0 100644 --- a/operator/konflux.Dockerfile +++ b/operator/konflux.Dockerfile @@ -1,4 +1,4 @@ -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:527782f4a0270f786192281f68d0374f4a21b3ab759643eee4bfcafb6f539468 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:aa03597ee8c7594ffecef5cbb6a0f059d362259d2a41225617b27ec912a3d0d3 AS builder WORKDIR /go/src/github.com/stackrox/rox/app diff --git a/scanner/image/scanner/konflux.Dockerfile b/scanner/image/scanner/konflux.Dockerfile index 17b1dee562a1e..6dbff3257dad4 100644 --- a/scanner/image/scanner/konflux.Dockerfile +++ b/scanner/image/scanner/konflux.Dockerfile @@ -1,4 +1,4 @@ -FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:527782f4a0270f786192281f68d0374f4a21b3ab759643eee4bfcafb6f539468 AS builder +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_golang_1.25@sha256:aa03597ee8c7594ffecef5cbb6a0f059d362259d2a41225617b27ec912a3d0d3 AS builder ARG BUILD_TAG RUN if [[ "$BUILD_TAG" == "" ]]; then >&2 echo "error: required BUILD_TAG arg is unset"; exit 6; fi From 204cd951a6672ce6ff3eb298b7b8dd2f11d1903f Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Wed, 18 Feb 2026 18:58:45 +0000 Subject: [PATCH 220/232] chore(ci): add GHA workflow commands for better log readability (#19056) Signed-off-by: Tomasz Janiszewski Co-authored-by: Claude Sonnet 4.5 Co-authored-by: Marcin Owsiany --- scripts/ci/jobs/check-generated.sh | 38 ++++++++++++++++++----- scripts/lib.sh | 50 ++++++++++++++++++++++++++---- 2 files changed, 75 insertions(+), 13 deletions(-) diff --git a/scripts/ci/jobs/check-generated.sh b/scripts/ci/jobs/check-generated.sh index 7f79983738e2a..0c82a8905a90e 100755 --- a/scripts/ci/jobs/check-generated.sh +++ b/scripts/ci/jobs/check-generated.sh @@ -46,14 +46,21 @@ FAIL_FLAG="/tmp/fail" # 2. a single point for handling errors after each check. # shellcheck disable=SC2016 -info 'Ensure that generated files are up to date. (If this fails, run `make proto-generated-srcs && make go-generated-srcs` and commit the result.)' +info 'Check: Generated files are up to date. If this fails, run `make proto-generated-srcs && make go-generated-srcs` and commit the result.' function generated_files-are-up-to-date() { git ls-files --others --exclude-standard >/tmp/untracked + + github_group 'Running make proto-generated-srcs' make proto-generated-srcs + github_endgroup + # Remove generated mocks, they should be regenerated and if source was deleted they should be deleted as well. git grep --files-with-matches "Package mocks is a generated GoMock package." -- '*.go' | xargs rm - # Print the timestamp along with each new line of output, so we can track how long each command takes - make go-generated-srcs 2>&1 | while IFS= read -r line; do printf '[%s] %s\n' "$(date '+%Y-%m-%d %H:%M:%S')" "$line"; done + + github_group 'Running make go-generated-srcs' + make go-generated-srcs + github_endgroup + git diff --exit-code HEAD { git ls-files --others --exclude-standard ; cat /tmp/untracked ; } | sort | uniq -u >/tmp/untracked-new @@ -78,14 +85,21 @@ bash -c generated_files-are-up-to-date || { } # shellcheck disable=SC2016 -info 'Check operator files are up to date (If this fails, run `make -C operator manifests generate bundle` and commit the result.)' +info 'Check: Operator generated files are up to date. If this fails, run `make -C operator manifests generate bundle` and commit the result.' function check-operator-generated-files-up-to-date() { + github_group 'Generate operator files' make -C operator/ generate make -C operator/ manifests + github_endgroup + echo 'Checking for diffs after making generate and manifests...' git diff --exit-code HEAD + + github_group 'Generate operator bundle' make -C operator/ bundle + github_endgroup + echo 'Checking for diffs after making bundle...' echo 'If this fails, check if the invocation of the normalize-metadata.py script in operator/Makefile' echo 'needs to change due to formatting changes in the generated files.' @@ -93,6 +107,7 @@ function check-operator-generated-files-up-to-date() { # For as long as the helm chart kubebuilder plugin is alpha, we want to check that kubebuilder bumps do not surprise # us with unexpected divergence compared to the (more seasoned and predictable) manifest output. + github_group 'Generate operator chart' make -C operator/ chart echo 'Expanding the operator helm chart...' helm template --namespace rhacs-operator-system rhacs-operator ./operator/dist/chart/ > operator/dist/chart.yaml @@ -107,6 +122,8 @@ function check-operator-generated-files-up-to-date() { operator/dist/chart.yaml > operator/dist/chart-sorted.yaml $yq -P ea '[.] | sort_by(.kind, .metadata.name) | filter(.kind != "Namespace") | .[] | splitDoc | ... comments=""' \ operator/dist/install.yaml > operator/dist/install-sorted.yaml + github_endgroup + echo 'Checking for differences between normalized operator manifest and normalized and expanded operator helm chart...' diff -U 10 operator/dist/install-sorted.yaml operator/dist/chart-sorted.yaml } @@ -120,9 +137,12 @@ bash -c check-operator-generated-files-up-to-date || { } # shellcheck disable=SC2016 -info 'Check config-controller files are up to date (If this fails, run `make config-controller-gen` and commit the result.)' +info 'Check: Config-controller generated files are up to date. If this fails, run `make config-controller-gen` and commit the result.' function check-config-controller-generated-files-up-to-date() { + github_group 'Running make config-controller-gen' make config-controller-gen + github_endgroup + echo 'Checking for diffs after making config-controller-gen...' git diff --exit-code HEAD } @@ -135,7 +155,8 @@ bash -c check-config-controller-generated-files-up-to-date || { echo check-config-controller-generated-files-up-to-date >> "$FAIL_FLAG" } -info 'Check .containerignore file is in sync with .dockerignore (If this fails, follow instructions in .containerignore to update it.)' +info 'Check: .containerignore file is in sync with .dockerignore' +info 'If this fails, follow instructions in .containerignore to update it.' function check-containerignore-is-in-sync() { diff \ --unified \ @@ -154,9 +175,12 @@ bash -c check-containerignore-is-in-sync || { } # shellcheck disable=SC2016 -echo 'Check if a script that was on the failed shellcheck list is now fixed. (If this fails, run `make update-shellcheck-skip` and commit the result.)' +info 'Check: Shellcheck skip list is up to date. If this fails, run `make update-shellcheck-skip` and commit the result.' function check-shellcheck-failing-list() { + github_group 'Running make update-shellcheck-skip' make update-shellcheck-skip + github_endgroup + echo 'Checking for diffs after updating shellcheck failing list...' if ! git diff --exit-code HEAD; then echo 'Failure only if files can be removed from the skip file.' diff --git a/scripts/lib.sh b/scripts/lib.sh index f59c9cd6a7c9b..4acddb456158c 100755 --- a/scripts/lib.sh +++ b/scripts/lib.sh @@ -13,14 +13,56 @@ Reuse with: method [args...]" } +is_GITHUB_ACTIONS() { + [[ -n "${GITHUB_ACTION:-}" ]] +} +export -f is_GITHUB_ACTIONS + info() { - echo "INFO: $(date): $*" + if is_GITHUB_ACTIONS; then + echo "::notice::INFO: $*" + else + echo "INFO: $(date): $*" + fi } +export -f info + +warn() { + if is_GITHUB_ACTIONS; then + echo "::warning::WARNING: $*" + else + echo "WARNING: $(date): $*" + fi +} +export -f warn die() { - echo >&2 "ERROR:" "$@" + if is_GITHUB_ACTIONS; then + echo >&2 "::error::ERROR: $*" + else + echo >&2 "ERROR:" "$@" + fi exit 1 } +export -f die + +# Start a collapsible group in GitHub Actions logs +github_group() { + if is_GITHUB_ACTIONS; then + echo "::group::$*" + else + info "$*" + fi +} +export -f github_group + +# End a collapsible group in GitHub Actions logs +github_endgroup() { + if is_GITHUB_ACTIONS; then + echo "::endgroup::" + fi +} +export -f github_endgroup # Caution when editing: make sure groups would correspond to BASH_REMATCH use. RELEASE_RC_TAG_BASH_REGEX='^([[:digit:]]+(\.[[:digit:]]+)*)(-rc\.[[:digit:]]+)?$' @@ -74,10 +116,6 @@ is_OPENSHIFT_CI() { [[ "${OPENSHIFT_CI:-}" == "true" ]] } -is_GITHUB_ACTIONS() { - [[ -n "${GITHUB_ACTION:-}" ]] -} - is_darwin() { uname -a | grep -i darwin >/dev/null 2>&1 } From adc404b27cd270c04ad5a89b7a4c50c658f1a1dd Mon Sep 17 00:00:00 2001 From: David House <105243888+davdhacs@users.noreply.github.com> Date: Wed, 18 Feb 2026 13:00:08 -0700 Subject: [PATCH 221/232] fix(ci): emailsender test (kind bug) (#19050) Co-authored-by: Claude Opus 4.6 (1M context) --- .../workflows/emailsender-central-compatibility.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/emailsender-central-compatibility.yaml b/.github/workflows/emailsender-central-compatibility.yaml index 7aac683b8580b..23f5a6fb8ab21 100644 --- a/.github/workflows/emailsender-central-compatibility.yaml +++ b/.github/workflows/emailsender-central-compatibility.yaml @@ -19,6 +19,10 @@ on: - 'central/auth/m2m/**' - '.github/workflows/emailsender-central-compatibility.yaml' +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + jobs: e2e-test-on-kind: timeout-minutes: 60 @@ -29,10 +33,6 @@ jobs: id-token: write contents: read steps: - - name: Cancel Previous Runs - uses: n1hility/cancel-previous-runs@v3 - with: - token: ${{ secrets.GITHUB_TOKEN }} - name: Login to Quay.io uses: docker/login-action@v3 with: @@ -57,11 +57,11 @@ jobs: uses: actions/setup-go@v6 with: go-version-file: stackrox/go.mod - cache: false # repo checkout is in stackrox/ subdir, setup-go can't find go.sum + cache: false # rarely runs, and repo checkout is in stackrox/ subdir - uses: ./stackrox/.github/actions/job-preamble with: free-disk-space: 40 - - name: Create Kind cluster" + - name: Create Kind cluster uses: helm/kind-action@v1 with: cluster_name: kind From 73a3b6dfe13ff2ae0978a8d1609a62fe4a2d0c70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 14:46:32 -0700 Subject: [PATCH 222/232] chore(deps): bump github.com/grpc-ecosystem/grpc-gateway/v2 from 2.27.4 to 2.28.0 (#19080) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: StackRox PR Fixxxer --- ...administration_events_service.swagger.json | 46 ++--- .../administration_usage_service.swagger.json | 6 +- generated/api/v1/alert_service.swagger.json | 102 +++++----- .../api/v1/api_token_service.swagger.json | 12 +- generated/api/v1/audit.swagger.json | 2 +- generated/api/v1/auth_service.swagger.json | 16 +- .../api/v1/authprovider_service.swagger.json | 20 +- generated/api/v1/backup_service.swagger.json | 20 +- .../v1/central_health_service.swagger.json | 4 +- .../api/v1/cloud_source_service.swagger.json | 54 ++--- .../api/v1/cluster_init_service.swagger.json | 18 +- generated/api/v1/cluster_service.swagger.json | 16 +- generated/api/v1/common.swagger.json | 2 +- ...compliance_management_service.swagger.json | 8 +- .../api/v1/compliance_service.swagger.json | 190 +++++++++--------- generated/api/v1/config_service.swagger.json | 62 +++--- .../v1/credential_expiry_service.swagger.json | 4 +- generated/api/v1/cve_service.swagger.json | 10 +- generated/api/v1/db_service.swagger.json | 10 +- generated/api/v1/debug_service.swagger.json | 136 ++++++------- ...arative_config_health_service.swagger.json | 34 ++-- ...gated_registry_config_service.swagger.json | 8 +- .../api/v1/deployment_service.swagger.json | 96 ++++----- .../api/v1/detection_service.swagger.json | 134 ++++++------ .../discovered_cluster_service.swagger.json | 8 +- generated/api/v1/empty.swagger.json | 2 +- .../api/v1/feature_flag_service.swagger.json | 4 +- generated/api/v1/group_service.swagger.json | 14 +- .../v1/grpc_preference_service.swagger.json | 4 +- .../v1/image_integration_service.swagger.json | 18 +- generated/api/v1/image_service.swagger.json | 46 ++--- .../integration_health_service.swagger.json | 12 +- .../api/v1/metadata_service.swagger.json | 12 +- generated/api/v1/mitre_service.swagger.json | 6 +- .../api/v1/namespace_service.swagger.json | 6 +- .../v1/network_baseline_service.swagger.json | 58 +++--- .../api/v1/network_graph_service.swagger.json | 64 +++--- .../v1/network_policy_service.swagger.json | 136 ++++++------- generated/api/v1/node_service.swagger.json | 40 ++-- generated/api/v1/notifications.swagger.json | 2 +- .../api/v1/notifier_service.swagger.json | 18 +- generated/api/v1/pagination.swagger.json | 2 +- generated/api/v1/ping_service.swagger.json | 4 +- generated/api/v1/pod_service.swagger.json | 8 +- .../v1/policy_category_service.swagger.json | 12 +- generated/api/v1/policy_service.swagger.json | 108 +++++----- .../api/v1/probe_upload_service.swagger.json | 4 +- .../v1/process_baseline_service.swagger.json | 14 +- ...ess_listening_on_port_service.swagger.json | 4 +- generated/api/v1/process_service.swagger.json | 10 +- generated/api/v1/rbac_service.swagger.json | 14 +- .../report_configuration_service.swagger.json | 74 +++---- generated/api/v1/report_service.swagger.json | 4 +- .../resource_collection_service.swagger.json | 18 +- generated/api/v1/role_service.swagger.json | 158 +++++++-------- generated/api/v1/sbom.swagger.json | 2 +- generated/api/v1/search_service.swagger.json | 8 +- generated/api/v1/secret_service.swagger.json | 8 +- .../v1/sensor_upgrade_service.swagger.json | 10 +- .../v1/service_account_service.swagger.json | 6 +- .../v1/service_identity_service.swagger.json | 8 +- generated/api/v1/signal.swagger.json | 2 +- ...signature_integration_service.swagger.json | 12 +- .../api/v1/telemetry_service.swagger.json | 10 +- generated/api/v1/traits.swagger.json | 2 +- generated/api/v1/user_service.swagger.json | 8 +- .../api/v1/vuln_mgmt_service.swagger.json | 96 ++++----- go.mod | 6 +- go.sum | 12 +- 69 files changed, 1042 insertions(+), 1042 deletions(-) diff --git a/generated/api/v1/administration_events_service.swagger.json b/generated/api/v1/administration_events_service.swagger.json index c8e1f2179508e..b3be1f4c54036 100644 --- a/generated/api/v1/administration_events_service.swagger.json +++ b/generated/api/v1/administration_events_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -172,7 +172,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -203,7 +203,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -288,7 +288,25 @@ } }, "definitions": { - "googlerpcStatus": { + "AdministrationEventResource": { + "type": "object", + "properties": { + "type": { + "type": "string", + "description": "Resource type associated with the event. An event may refer to an underlying resource\nsuch as a particular image. In that case, the resource type will be filled here." + }, + "id": { + "type": "string", + "description": "Resource ID associated with the event. If an event refers to an underlying resource,\nthe resource ID identifies the underlying resource. The resource ID is not guaranteed\nto be set, depending on the context of the administration event." + }, + "name": { + "type": "string", + "description": "Resource name associated with the event. If an event refers to an underlying resource,\nthe resource name identifies the underlying resource. The resource name is not guaranteed\nto be set, depending on the context of the administration event." + } + }, + "description": "Resource holds all information about the resource associated with the event." + }, + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -346,7 +364,7 @@ "description": "Domain associated with the event. An event's domain outlines the feature domain where\nthe event was created from. As an example, this might be \"Image Scanning\".\nIn case of events that cannot be tied to a specific domain, this will be \"General\"." }, "resource": { - "$ref": "#/definitions/v1AdministrationEventResource" + "$ref": "#/definitions/AdministrationEventResource" }, "numOccurrences": { "type": "string", @@ -378,24 +396,6 @@ "default": "ADMINISTRATION_EVENT_LEVEL_UNKNOWN", "description": "AdministrationEventLevel exposes the different levels of events." }, - "v1AdministrationEventResource": { - "type": "object", - "properties": { - "type": { - "type": "string", - "description": "Resource type associated with the event. An event may refer to an underlying resource\nsuch as a particular image. In that case, the resource type will be filled here." - }, - "id": { - "type": "string", - "description": "Resource ID associated with the event. If an event refers to an underlying resource,\nthe resource ID identifies the underlying resource. The resource ID is not guaranteed\nto be set, depending on the context of the administration event." - }, - "name": { - "type": "string", - "description": "Resource name associated with the event. If an event refers to an underlying resource,\nthe resource name identifies the underlying resource. The resource name is not guaranteed\nto be set, depending on the context of the administration event." - } - }, - "description": "Resource holds all information about the resource associated with the event." - }, "v1AdministrationEventType": { "type": "string", "enum": [ diff --git a/generated/api/v1/administration_usage_service.swagger.json b/generated/api/v1/administration_usage_service.swagger.json index c78a6b4d67fc1..a17c1c2c9f6c2 100644 --- a/generated/api/v1/administration_usage_service.swagger.json +++ b/generated/api/v1/administration_usage_service.swagger.json @@ -31,7 +31,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -55,7 +55,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -82,7 +82,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/alert_service.swagger.json b/generated/api/v1/alert_service.swagger.json index 5cc89649aa177..a19e57a509ac6 100644 --- a/generated/api/v1/alert_service.swagger.json +++ b/generated/api/v1/alert_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -103,7 +103,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -185,7 +185,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -218,7 +218,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -306,7 +306,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -382,7 +382,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -458,7 +458,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -489,7 +489,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -528,7 +528,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -592,17 +592,6 @@ } }, "definitions": { - "AlertDeploymentContainer": { - "type": "object", - "properties": { - "image": { - "$ref": "#/definitions/storageContainerImage" - }, - "name": { - "type": "string" - } - } - }, "AlertEnforcement": { "type": "object", "properties": { @@ -637,6 +626,25 @@ } } }, + "AlertNode": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "clusterId": { + "type": "string", + "description": "This field has to be duplicated in Alert for scope management and search." + }, + "clusterName": { + "type": "string", + "description": "This field has to be duplicated in Alert for scope management and search." + } + } + }, "AlertProcessViolation": { "type": "object", "properties": { @@ -729,6 +737,25 @@ } } }, + "DeploymentContainer": { + "type": "object", + "properties": { + "image": { + "$ref": "#/definitions/storageContainerImage" + }, + "name": { + "type": "string" + } + } + }, + "ExclusionImage": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, "FileAccessFileMetadata": { "type": "object", "properties": { @@ -932,7 +959,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -999,7 +1026,7 @@ "title": "Represents an alert on a kubernetes resource (configmaps, secrets, etc.)\nAn alert cannot be on more than one entity (deployment, container image, resource, etc.)" }, "node": { - "$ref": "#/definitions/storageAlertNode", + "$ref": "#/definitions/AlertNode", "title": "Represents an alert on a node.\nAn alert cannot be on more than one entity (deployment, container image, resource, etc.)" }, "violations": { @@ -1079,7 +1106,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/AlertDeploymentContainer" + "$ref": "#/definitions/DeploymentContainer" } }, "annotations": { @@ -1093,25 +1120,6 @@ } } }, - "storageAlertNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "clusterId": { - "type": "string", - "description": "This field has to be duplicated in Alert for scope management and search." - }, - "clusterName": { - "type": "string", - "description": "This field has to be duplicated in Alert for scope management and search." - } - } - }, "storageAlertResource": { "type": "object", "properties": { @@ -1204,7 +1212,7 @@ "$ref": "#/definitions/storageExclusionDeployment" }, "image": { - "$ref": "#/definitions/storageExclusionImage" + "$ref": "#/definitions/ExclusionImage" }, "expiration": { "type": "string", @@ -1223,14 +1231,6 @@ } } }, - "storageExclusionImage": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, "storageFileAccess": { "type": "object", "properties": { diff --git a/generated/api/v1/api_token_service.swagger.json b/generated/api/v1/api_token_service.swagger.json index 8e43ea4d141b8..96a411c1f51bb 100644 --- a/generated/api/v1/api_token_service.swagger.json +++ b/generated/api/v1/api_token_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -61,7 +61,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -94,7 +94,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -117,7 +117,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -148,7 +148,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -167,7 +167,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/audit.swagger.json b/generated/api/v1/audit.swagger.json index a41465200f384..713fd20b0be90 100644 --- a/generated/api/v1/audit.swagger.json +++ b/generated/api/v1/audit.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/auth_service.swagger.json b/generated/api/v1/auth_service.swagger.json index 6f05f7c6f7cd3..c119423342501 100644 --- a/generated/api/v1/auth_service.swagger.json +++ b/generated/api/v1/auth_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -84,7 +84,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -117,7 +117,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -157,7 +157,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -186,7 +186,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -217,7 +217,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -303,7 +303,7 @@ }, "description": "ResourceToAccess represents a collection of permissions. It is wire\ncompatible with the old format of storage.Role and replaces it in\nplaces where only aggregated permissions are required." }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/authprovider_service.swagger.json b/generated/api/v1/authprovider_service.swagger.json index 18588c3526635..2c4d2711688a0 100644 --- a/generated/api/v1/authprovider_service.swagger.json +++ b/generated/api/v1/authprovider_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -63,7 +63,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -95,7 +95,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -127,7 +127,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -155,7 +155,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -189,7 +189,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -225,7 +225,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -263,7 +263,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -285,7 +285,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -426,7 +426,7 @@ }, "description": "ResourceToAccess represents a collection of permissions. It is wire\ncompatible with the old format of storage.Role and replaces it in\nplaces where only aggregated permissions are required." }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/backup_service.swagger.json b/generated/api/v1/backup_service.swagger.json index c051cd55e38d1..d2b08da1642d8 100644 --- a/generated/api/v1/backup_service.swagger.json +++ b/generated/api/v1/backup_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -84,7 +84,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -117,7 +117,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -150,7 +150,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -189,7 +189,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -218,7 +218,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -247,7 +247,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -276,7 +276,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -419,7 +419,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/central_health_service.swagger.json b/generated/api/v1/central_health_service.swagger.json index a23b89f448581..8f503d22ee80c 100644 --- a/generated/api/v1/central_health_service.swagger.json +++ b/generated/api/v1/central_health_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -40,7 +40,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/cloud_source_service.swagger.json b/generated/api/v1/cloud_source_service.swagger.json index 0947315e463f3..2dcf635c25ed1 100644 --- a/generated/api/v1/cloud_source_service.swagger.json +++ b/generated/api/v1/cloud_source_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -125,7 +125,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -158,7 +158,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -191,7 +191,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -230,7 +230,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -259,7 +259,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -290,7 +290,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -330,6 +330,23 @@ } }, "definitions": { + "CloudSourceCredentials": { + "type": "object", + "properties": { + "secret": { + "type": "string", + "description": "Used for single-valued authentication via long-lived tokens." + }, + "clientId": { + "type": "string", + "description": "Used for client authentication in combination with client_secret." + }, + "clientSecret": { + "type": "string", + "description": "Used for client authentication in combination with client_id." + } + } + }, "CloudSourcesServiceUpdateCloudSourceBody": { "type": "object", "properties": { @@ -343,7 +360,7 @@ "$ref": "#/definitions/v1CloudSourceType" }, "credentials": { - "$ref": "#/definitions/v1CloudSourceCredentials" + "$ref": "#/definitions/CloudSourceCredentials" }, "skipTestIntegration": { "type": "boolean" @@ -363,7 +380,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -427,7 +444,7 @@ "$ref": "#/definitions/v1CloudSourceType" }, "credentials": { - "$ref": "#/definitions/v1CloudSourceCredentials" + "$ref": "#/definitions/CloudSourceCredentials" }, "skipTestIntegration": { "type": "boolean" @@ -441,23 +458,6 @@ }, "description": "CloudSource is an integration which provides a source for discovered\nclusters." }, - "v1CloudSourceCredentials": { - "type": "object", - "properties": { - "secret": { - "type": "string", - "description": "Used for single-valued authentication via long-lived tokens." - }, - "clientId": { - "type": "string", - "description": "Used for client authentication in combination with client_secret." - }, - "clientSecret": { - "type": "string", - "description": "Used for client authentication in combination with client_id." - } - } - }, "v1CloudSourceType": { "type": "string", "enum": [ diff --git a/generated/api/v1/cluster_init_service.swagger.json b/generated/api/v1/cluster_init_service.swagger.json index 13f0c54ee0ea7..f82d4953befee 100644 --- a/generated/api/v1/cluster_init_service.swagger.json +++ b/generated/api/v1/cluster_init_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -71,7 +71,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -103,7 +103,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -136,7 +136,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -168,7 +168,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -188,7 +188,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -221,7 +221,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -282,7 +282,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/cluster_service.swagger.json b/generated/api/v1/cluster_service.swagger.json index 8f70c3128ee8a..7632780cbde73 100644 --- a/generated/api/v1/cluster_service.swagger.json +++ b/generated/api/v1/cluster_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -79,7 +79,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -112,7 +112,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -134,7 +134,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -162,7 +162,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -190,7 +190,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -389,7 +389,7 @@ "default": "UPGRADE_INITIALIZING", "description": " - UPGRADER_LAUNCHING: In-progress states.\n - UPGRADE_COMPLETE: The success state.\nPLEASE NUMBER ALL IN-PROGRESS STATES ABOVE THIS\nAND ALL ERROR STATES BELOW THIS.\n - UPGRADE_INITIALIZATION_ERROR: Error states." }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/common.swagger.json b/generated/api/v1/common.swagger.json index 396e38f455412..97669590fb57b 100644 --- a/generated/api/v1/common.swagger.json +++ b/generated/api/v1/common.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/compliance_management_service.swagger.json b/generated/api/v1/compliance_management_service.swagger.json index b77e015018b8c..e243bbe6ff9f5 100644 --- a/generated/api/v1/compliance_management_service.swagger.json +++ b/generated/api/v1/compliance_management_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -72,7 +72,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -133,7 +133,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/compliance_service.swagger.json b/generated/api/v1/compliance_service.swagger.json index eb488ab79cf9a..a185da8b8b6e4 100644 --- a/generated/api/v1/compliance_service.swagger.json +++ b/generated/api/v1/compliance_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -143,7 +143,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -186,7 +186,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -208,7 +208,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -236,7 +236,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -275,6 +275,91 @@ }, "title": "Next available tag: 3" }, + "ComplianceAggregationResult": { + "type": "object", + "properties": { + "aggregationKeys": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ComplianceAggregationAggregationKey" + } + }, + "unit": { + "$ref": "#/definitions/storageComplianceAggregationScope" + }, + "numPassing": { + "type": "integer", + "format": "int32" + }, + "numFailing": { + "type": "integer", + "format": "int32" + }, + "numSkipped": { + "type": "integer", + "format": "int32" + } + }, + "title": "Next available tag: 5" + }, + "ComplianceDomainCluster": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "title": "These must mirror the tags _exactly_ in cluster.proto for backwards compatibility" + }, + "ComplianceDomainDeployment": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "namespaceId": { + "type": "string" + }, + "clusterId": { + "type": "string" + }, + "clusterName": { + "type": "string" + } + }, + "title": "This must mirror the tags _exactly_ in deployment.proto for backwards compatibility" + }, + "ComplianceDomainNode": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "clusterId": { + "type": "string" + }, + "clusterName": { + "type": "string" + } + }, + "title": "These must mirror the tags _exactly_ in node.proto for backwards compatibility" + }, "ComplianceResultValueEvidence": { "type": "object", "properties": { @@ -309,7 +394,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -346,7 +431,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageComplianceAggregationResult" + "$ref": "#/definitions/ComplianceAggregationResult" } }, "sources": { @@ -362,34 +447,6 @@ }, "title": "Next available tag: 3" }, - "storageComplianceAggregationResult": { - "type": "object", - "properties": { - "aggregationKeys": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/ComplianceAggregationAggregationKey" - } - }, - "unit": { - "$ref": "#/definitions/storageComplianceAggregationScope" - }, - "numPassing": { - "type": "integer", - "format": "int32" - }, - "numFailing": { - "type": "integer", - "format": "int32" - }, - "numSkipped": { - "type": "integer", - "format": "int32" - } - }, - "title": "Next available tag: 5" - }, "storageComplianceAggregationScope": { "type": "string", "enum": [ @@ -434,80 +491,23 @@ "type": "string" }, "cluster": { - "$ref": "#/definitions/storageComplianceDomainCluster" + "$ref": "#/definitions/ComplianceDomainCluster" }, "nodes": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/storageComplianceDomainNode" + "$ref": "#/definitions/ComplianceDomainNode" } }, "deployments": { "type": "object", "additionalProperties": { - "$ref": "#/definitions/storageComplianceDomainDeployment" + "$ref": "#/definitions/ComplianceDomainDeployment" } } }, "title": "Next available tag: 5" }, - "storageComplianceDomainCluster": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - } - }, - "title": "These must mirror the tags _exactly_ in cluster.proto for backwards compatibility" - }, - "storageComplianceDomainDeployment": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "type": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "namespaceId": { - "type": "string" - }, - "clusterId": { - "type": "string" - }, - "clusterName": { - "type": "string" - } - }, - "title": "This must mirror the tags _exactly_ in deployment.proto for backwards compatibility" - }, - "storageComplianceDomainNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "clusterId": { - "type": "string" - }, - "clusterName": { - "type": "string" - } - }, - "title": "These must mirror the tags _exactly_ in node.proto for backwards compatibility" - }, "storageComplianceResultValue": { "type": "object", "properties": { diff --git a/generated/api/v1/config_service.swagger.json b/generated/api/v1/config_service.swagger.json index 54b15dba2a781..def9fc0e83052 100644 --- a/generated/api/v1/config_service.swagger.json +++ b/generated/api/v1/config_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -49,7 +49,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -81,7 +81,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -101,7 +101,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -134,7 +134,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -156,7 +156,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -178,7 +178,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -198,7 +198,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -230,7 +230,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -288,6 +288,24 @@ } } }, + "PrometheusMetricsGroup": { + "type": "object", + "properties": { + "gatheringPeriodMinutes": { + "type": "integer", + "format": "int64", + "description": "The gathering period for periodically gathered metrics. If set to zero,\ngathering is disabled." + }, + "descriptors": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/GroupLabels" + }, + "description": "Metric descriptors is a map of metric names to the list of allowed\nlabels." + } + }, + "description": "A group is a collection of metrics that are computed by the same\naggregator. Metrics in a group may use different subsets of a complete list\nof labels supported by the aggregator." + }, "RuleNamespaceRule": { "type": "object", "properties": { @@ -296,7 +314,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -501,35 +519,17 @@ "type": "object", "properties": { "imageVulnerabilities": { - "$ref": "#/definitions/storagePrometheusMetricsGroup" + "$ref": "#/definitions/PrometheusMetricsGroup" }, "policyViolations": { - "$ref": "#/definitions/storagePrometheusMetricsGroup" + "$ref": "#/definitions/PrometheusMetricsGroup" }, "nodeVulnerabilities": { - "$ref": "#/definitions/storagePrometheusMetricsGroup" + "$ref": "#/definitions/PrometheusMetricsGroup" } }, "title": "next available tag: 4" }, - "storagePrometheusMetricsGroup": { - "type": "object", - "properties": { - "gatheringPeriodMinutes": { - "type": "integer", - "format": "int64", - "description": "The gathering period for periodically gathered metrics. If set to zero,\ngathering is disabled." - }, - "descriptors": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/GroupLabels" - }, - "description": "Metric descriptors is a map of metric names to the list of allowed\nlabels." - } - }, - "description": "A group is a collection of metrics that are computed by the same\naggregator. Metrics in a group may use different subsets of a complete list\nof labels supported by the aggregator." - }, "storagePublicConfig": { "type": "object", "properties": { diff --git a/generated/api/v1/credential_expiry_service.swagger.json b/generated/api/v1/credential_expiry_service.swagger.json index 8195f86651b3e..f5c32c154bd68 100644 --- a/generated/api/v1/credential_expiry_service.swagger.json +++ b/generated/api/v1/credential_expiry_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -57,7 +57,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/cve_service.swagger.json b/generated/api/v1/cve_service.swagger.json index 8851c3e0212e0..53503c19cd0a3 100644 --- a/generated/api/v1/cve_service.swagger.json +++ b/generated/api/v1/cve_service.swagger.json @@ -33,7 +33,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -66,7 +66,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -99,7 +99,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -132,7 +132,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -153,7 +153,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/db_service.swagger.json b/generated/api/v1/db_service.swagger.json index 70daa79569467..eff15bc22e485 100644 --- a/generated/api/v1/db_service.swagger.json +++ b/generated/api/v1/db_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -87,7 +87,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -109,7 +109,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -162,7 +162,7 @@ }, "description": "LocalFileInfo provides information about the file on the local machine of the user initiating the restore\nprocess, in order to provide information to other users about ongoing restore processes." }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/debug_service.swagger.json b/generated/api/v1/debug_service.swagger.json index 21ccec829b289..d9370022c94de 100644 --- a/generated/api/v1/debug_service.swagger.json +++ b/generated/api/v1/debug_service.swagger.json @@ -30,7 +30,7 @@ "$ref": "#/definitions/v1AuthorizationTraceResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1AuthorizationTraceResponse" @@ -39,7 +39,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -62,7 +62,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -85,7 +85,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -119,7 +119,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -160,25 +160,58 @@ } } }, - "AuthorizationTraceResponseUserRole": { + "AuthorizationTraceResponseUser": { "type": "object", "properties": { - "name": { + "username": { "type": "string" }, - "permissions": { + "friendlyName": { + "type": "string" + }, + "aggregatedPermissions": { "type": "object", "additionalProperties": { "$ref": "#/definitions/storageAccess" } }, - "accessScopeName": { + "roles": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/UserRole" + } + } + } + }, + "SetBasedLabelSelectorOperator": { + "type": "string", + "enum": [ + "UNKNOWN", + "IN", + "NOT_IN", + "EXISTS", + "NOT_EXISTS" + ], + "default": "UNKNOWN" + }, + "SetBasedLabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { "type": "string" }, - "accessScope": { - "$ref": "#/definitions/SimpleAccessScopeRules" + "op": { + "$ref": "#/definitions/SetBasedLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } } - } + }, + "title": "Next available tag: 4" }, "SimpleAccessScopeRules": { "type": "object", @@ -258,7 +291,27 @@ } } }, - "googlerpcStatus": { + "UserRole": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "permissions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/storageAccess" + } + }, + "accessScopeName": { + "type": "string" + }, + "accessScope": { + "$ref": "#/definitions/SimpleAccessScopeRules" + } + } + }, + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -304,41 +357,12 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageSetBasedLabelSelectorRequirement" + "$ref": "#/definitions/SetBasedLabelSelectorRequirement" } } }, "description": "SetBasedLabelSelector only allows set-based label requirements.\n\nNext available tag: 3" }, - "storageSetBasedLabelSelectorOperator": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN", - "NOT_IN", - "EXISTS", - "NOT_EXISTS" - ], - "default": "UNKNOWN" - }, - "storageSetBasedLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageSetBasedLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "v1AuthorizationTraceResponse": { "type": "object", "properties": { @@ -357,7 +381,7 @@ "$ref": "#/definitions/v1AuthorizationTraceResponseResponse" }, "user": { - "$ref": "#/definitions/v1AuthorizationTraceResponseUser" + "$ref": "#/definitions/AuthorizationTraceResponseUser" }, "trace": { "$ref": "#/definitions/AuthorizationTraceResponseTrace" @@ -386,30 +410,6 @@ } } }, - "v1AuthorizationTraceResponseUser": { - "type": "object", - "properties": { - "username": { - "type": "string" - }, - "friendlyName": { - "type": "string" - }, - "aggregatedPermissions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/storageAccess" - } - }, - "roles": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/AuthorizationTraceResponseUserRole" - } - } - } - }, "v1Empty": { "type": "object" }, diff --git a/generated/api/v1/declarative_config_health_service.swagger.json b/generated/api/v1/declarative_config_health_service.swagger.json index a94b35e8a1552..14ef10a3ad2c6 100644 --- a/generated/api/v1/declarative_config_health_service.swagger.json +++ b/generated/api/v1/declarative_config_health_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -40,7 +40,21 @@ } }, "definitions": { - "googlerpcStatus": { + "DeclarativeConfigHealthResourceType": { + "type": "string", + "enum": [ + "CONFIG_MAP", + "ACCESS_SCOPE", + "PERMISSION_SET", + "ROLE", + "AUTH_PROVIDER", + "GROUP", + "NOTIFIER", + "AUTH_MACHINE_TO_MACHINE_CONFIG" + ], + "default": "CONFIG_MAP" + }, + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -89,7 +103,7 @@ "type": "string" }, "resourceType": { - "$ref": "#/definitions/storageDeclarativeConfigHealthResourceType" + "$ref": "#/definitions/DeclarativeConfigHealthResourceType" }, "lastTimestamp": { "type": "string", @@ -98,20 +112,6 @@ } } }, - "storageDeclarativeConfigHealthResourceType": { - "type": "string", - "enum": [ - "CONFIG_MAP", - "ACCESS_SCOPE", - "PERMISSION_SET", - "ROLE", - "AUTH_PROVIDER", - "GROUP", - "NOTIFIER", - "AUTH_MACHINE_TO_MACHINE_CONFIG" - ], - "default": "CONFIG_MAP" - }, "storageDeclarativeConfigHealthStatus": { "type": "string", "enum": [ diff --git a/generated/api/v1/delegated_registry_config_service.swagger.json b/generated/api/v1/delegated_registry_config_service.swagger.json index 83e3429604bc0..00f59b70a2a22 100644 --- a/generated/api/v1/delegated_registry_config_service.swagger.json +++ b/generated/api/v1/delegated_registry_config_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -85,7 +85,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -119,7 +119,7 @@ "default": "NONE", "title": "- NONE: Scan all images via central services except for images from the OCP integrated registry\n - ALL: Scan all images via the secured clusters\n - SPECIFIC: Scan images that match `registries` or are from the OCP integrated registry via the secured clusters\notherwise scan via central services" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/deployment_service.swagger.json b/generated/api/v1/deployment_service.swagger.json index 30e9b5b29bc7d..c6911170e904f 100644 --- a/generated/api/v1/deployment_service.swagger.json +++ b/generated/api/v1/deployment_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -106,7 +106,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -129,7 +129,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -160,7 +160,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -236,7 +236,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -312,7 +312,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -342,7 +342,7 @@ "$ref": "#/definitions/v1ExportDeploymentResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1ExportDeploymentResponse" @@ -351,7 +351,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -427,6 +427,24 @@ "default": "UNSET", "title": "For any update to EnvVarSource, please also update 'ui/src/messages/common.js'" }, + "LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/storageLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, "ListDeploymentsWithProcessInfoResponseDeploymentWithProcessInfo": { "type": "object", "properties": { @@ -506,6 +524,25 @@ } } }, + "RiskResult": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "factors": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/ResultFactor" + } + }, + "score": { + "type": "number", + "format": "float" + } + } + }, "SeccompProfileProfileType": { "type": "string", "enum": [ @@ -552,7 +589,7 @@ ], "default": "NONE" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -878,7 +915,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageLabelSelectorRequirement" + "$ref": "#/definitions/LabelSelectorRequirement" } } }, @@ -896,24 +933,6 @@ ], "default": "UNKNOWN" }, - "storageLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageListDeployment": { "type": "object", "properties": { @@ -1044,30 +1063,11 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageRiskResult" + "$ref": "#/definitions/RiskResult" } } } }, - "storageRiskResult": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "factors": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/ResultFactor" - } - }, - "score": { - "type": "number", - "format": "float" - } - } - }, "storageRiskSubject": { "type": "object", "properties": { diff --git a/generated/api/v1/detection_service.swagger.json b/generated/api/v1/detection_service.swagger.json index babea44237b88..af67d97713d71 100644 --- a/generated/api/v1/detection_service.swagger.json +++ b/generated/api/v1/detection_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -63,7 +63,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -96,7 +96,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -150,6 +150,25 @@ ], "default": "UNSET" }, + "AlertNode": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "clusterId": { + "type": "string", + "description": "This field has to be duplicated in Alert for scope management and search." + }, + "clusterName": { + "type": "string", + "description": "This field has to be duplicated in Alert for scope management and search." + } + } + }, "AlertProcessViolation": { "type": "object", "properties": { @@ -165,20 +184,6 @@ } } }, - "AlertResourceResourceType": { - "type": "string", - "enum": [ - "UNKNOWN", - "SECRETS", - "CONFIGMAPS", - "CLUSTER_ROLES", - "CLUSTER_ROLE_BINDINGS", - "NETWORK_POLICIES", - "SECURITY_CONTEXT_CONSTRAINTS", - "EGRESS_FIREWALLS" - ], - "default": "UNKNOWN" - }, "AlertViolation": { "type": "object", "properties": { @@ -262,6 +267,14 @@ "default": "UNSET", "title": "For any update to EnvVarSource, please also update 'ui/src/messages/common.js'" }, + "ExclusionImage": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, "FileAccessFileMetadata": { "type": "object", "properties": { @@ -308,6 +321,24 @@ } } }, + "LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/storageLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, "NetworkFlowInfoEntity": { "type": "object", "properties": { @@ -408,6 +439,20 @@ } } }, + "ResourceResourceType": { + "type": "string", + "enum": [ + "UNKNOWN", + "SECRETS", + "CONFIGMAPS", + "CLUSTER_ROLES", + "CLUSTER_ROLE_BINDINGS", + "NETWORK_POLICIES", + "SECURITY_CONTEXT_CONSTRAINTS", + "EGRESS_FIREWALLS" + ], + "default": "UNKNOWN" + }, "SeccompProfileProfileType": { "type": "string", "enum": [ @@ -480,7 +525,7 @@ ], "default": "NONE" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -547,7 +592,7 @@ "title": "Represents an alert on a kubernetes resource (configmaps, secrets, etc.)\nAn alert cannot be on more than one entity (deployment, container image, resource, etc.)" }, "node": { - "$ref": "#/definitions/storageAlertNode", + "$ref": "#/definitions/AlertNode", "title": "Represents an alert on a node.\nAn alert cannot be on more than one entity (deployment, container image, resource, etc.)" }, "violations": { @@ -641,30 +686,11 @@ } } }, - "storageAlertNode": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "clusterId": { - "type": "string", - "description": "This field has to be duplicated in Alert for scope management and search." - }, - "clusterName": { - "type": "string", - "description": "This field has to be duplicated in Alert for scope management and search." - } - } - }, "storageAlertResource": { "type": "object", "properties": { "resourceType": { - "$ref": "#/definitions/AlertResourceResourceType" + "$ref": "#/definitions/ResourceResourceType" }, "name": { "type": "string" @@ -981,7 +1007,7 @@ "$ref": "#/definitions/storageExclusionDeployment" }, "image": { - "$ref": "#/definitions/storageExclusionImage" + "$ref": "#/definitions/ExclusionImage" }, "expiration": { "type": "string", @@ -1000,14 +1026,6 @@ } } }, - "storageExclusionImage": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, "storageFileAccess": { "type": "object", "properties": { @@ -1097,7 +1115,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageLabelSelectorRequirement" + "$ref": "#/definitions/LabelSelectorRequirement" } } }, @@ -1115,24 +1133,6 @@ ], "default": "UNKNOWN" }, - "storageLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageLifecycleStage": { "type": "string", "enum": [ diff --git a/generated/api/v1/discovered_cluster_service.swagger.json b/generated/api/v1/discovered_cluster_service.swagger.json index 80f817942ad26..3ae228bd8d480 100644 --- a/generated/api/v1/discovered_cluster_service.swagger.json +++ b/generated/api/v1/discovered_cluster_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -114,7 +114,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -243,7 +243,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -286,7 +286,7 @@ ], "default": "PROVIDER_TYPE_UNSPECIFIED" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/empty.swagger.json b/generated/api/v1/empty.swagger.json index dfb58f424ab21..c12984dc079b7 100644 --- a/generated/api/v1/empty.swagger.json +++ b/generated/api/v1/empty.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/feature_flag_service.swagger.json b/generated/api/v1/feature_flag_service.swagger.json index f900753390c84..795e4e2532902 100644 --- a/generated/api/v1/feature_flag_service.swagger.json +++ b/generated/api/v1/feature_flag_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -40,7 +40,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/group_service.swagger.json b/generated/api/v1/group_service.swagger.json index cae2d857b80b6..f625fddcf5e00 100644 --- a/generated/api/v1/group_service.swagger.json +++ b/generated/api/v1/group_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -122,7 +122,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -168,7 +168,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -221,7 +221,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -252,7 +252,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -290,7 +290,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -311,7 +311,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/grpc_preference_service.swagger.json b/generated/api/v1/grpc_preference_service.swagger.json index 123147676b687..dc3c236fd6264 100644 --- a/generated/api/v1/grpc_preference_service.swagger.json +++ b/generated/api/v1/grpc_preference_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -40,7 +40,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/image_integration_service.swagger.json b/generated/api/v1/image_integration_service.swagger.json index 726c27cd57962..1385bd1518eb2 100644 --- a/generated/api/v1/image_integration_service.swagger.json +++ b/generated/api/v1/image_integration_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -65,7 +65,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -98,7 +98,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -131,7 +131,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -164,7 +164,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -203,7 +203,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -232,7 +232,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -261,7 +261,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -446,7 +446,7 @@ }, "title": "Robot account is Quay's named tokens that can be granted permissions on multiple repositories under an organization.\nIt's Quay's recommended authentication model when possible (i.e. registry integration)" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/image_service.swagger.json b/generated/api/v1/image_service.swagger.json index 962d131a39716..9c5fc898f84ab 100644 --- a/generated/api/v1/image_service.swagger.json +++ b/generated/api/v1/image_service.swagger.json @@ -29,7 +29,7 @@ "$ref": "#/definitions/v1ExportImageResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1ExportImageResponse" @@ -38,7 +38,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -76,7 +76,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -150,7 +150,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -232,7 +232,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -255,7 +255,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -288,7 +288,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -331,7 +331,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -407,7 +407,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -428,7 +428,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -458,7 +458,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -522,6 +522,16 @@ ], "default": "UI_NONE" }, + "EmbeddedVulnerabilityScoreVersion": { + "type": "string", + "enum": [ + "V2", + "V3" + ], + "default": "V2", + "description": "- V2: No unset for automatic backwards compatibility", + "title": "ScoreVersion can be deprecated ROX-26066" + }, "EmbeddedVulnerabilityVulnerabilityType": { "type": "string", "enum": [ @@ -544,7 +554,7 @@ ], "default": "NO_ERROR" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -929,7 +939,7 @@ "type": "string" }, "scoreVersion": { - "$ref": "#/definitions/storageEmbeddedVulnerabilityScoreVersion" + "$ref": "#/definitions/EmbeddedVulnerabilityScoreVersion" }, "cvssV2": { "$ref": "#/definitions/storageCVSSV2", @@ -1010,16 +1020,6 @@ }, "title": "Next Tag: 27" }, - "storageEmbeddedVulnerabilityScoreVersion": { - "type": "string", - "enum": [ - "V2", - "V3" - ], - "default": "V2", - "description": "- V2: No unset for automatic backwards compatibility", - "title": "ScoreVersion can be deprecated ROX-26066" - }, "storageImage": { "type": "object", "properties": { diff --git a/generated/api/v1/integration_health_service.swagger.json b/generated/api/v1/integration_health_service.swagger.json index 881b1c07c778b..272471a6b5b69 100644 --- a/generated/api/v1/integration_health_service.swagger.json +++ b/generated/api/v1/integration_health_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -73,7 +73,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -95,7 +95,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -117,7 +117,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -141,7 +141,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/metadata_service.swagger.json b/generated/api/v1/metadata_service.swagger.json index 5031383e3ecea..d37cf22dc91a4 100644 --- a/generated/api/v1/metadata_service.swagger.json +++ b/generated/api/v1/metadata_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -73,7 +73,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -95,7 +95,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -119,7 +119,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -168,7 +168,7 @@ ], "default": "NONE" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/mitre_service.swagger.json b/generated/api/v1/mitre_service.swagger.json index 1182bbf01e63b..79ccd8044ec9b 100644 --- a/generated/api/v1/mitre_service.swagger.json +++ b/generated/api/v1/mitre_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -53,7 +53,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -72,7 +72,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/namespace_service.swagger.json b/generated/api/v1/namespace_service.swagger.json index da912868e7256..3574ad8fb145a 100644 --- a/generated/api/v1/namespace_service.swagger.json +++ b/generated/api/v1/namespace_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -123,7 +123,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/network_baseline_service.swagger.json b/generated/api/v1/network_baseline_service.swagger.json index 83151d85510d8..38d7a0bb1d716 100644 --- a/generated/api/v1/network_baseline_service.swagger.json +++ b/generated/api/v1/network_baseline_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -67,7 +67,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -105,7 +105,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -193,7 +193,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -223,7 +223,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -261,7 +261,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -330,6 +330,27 @@ "NetworkBaselineServiceUnlockNetworkBaselineBody": { "type": "object" }, + "NetworkEntityInfoDeployment": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "cluster": { + "type": "string" + }, + "listenPorts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DeploymentListenPort" + } + } + } + }, "NetworkEntityInfoExternalSource": { "type": "object", "properties": { @@ -350,7 +371,7 @@ }, "description": "Update normalizeDupNameExtSrcs(...) in `central/networkgraph/aggregator/aggregator.go` whenever this message is updated." }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -492,34 +513,13 @@ "type": "string" }, "deployment": { - "$ref": "#/definitions/storageNetworkEntityInfoDeployment" + "$ref": "#/definitions/NetworkEntityInfoDeployment" }, "externalSource": { "$ref": "#/definitions/NetworkEntityInfoExternalSource" } } }, - "storageNetworkEntityInfoDeployment": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "cluster": { - "type": "string" - }, - "listenPorts": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/DeploymentListenPort" - } - } - } - }, "storageNetworkEntityInfoType": { "type": "string", "enum": [ diff --git a/generated/api/v1/network_graph_service.swagger.json b/generated/api/v1/network_graph_service.swagger.json index 679ec2b0d5fa2..841f6bd97449b 100644 --- a/generated/api/v1/network_graph_service.swagger.json +++ b/generated/api/v1/network_graph_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -90,7 +90,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -124,7 +124,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -162,7 +162,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -250,7 +250,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -344,7 +344,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -364,7 +364,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -396,7 +396,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -424,7 +424,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -463,6 +463,27 @@ } } }, + "NetworkEntityInfoDeployment": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "cluster": { + "type": "string" + }, + "listenPorts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DeploymentListenPort" + } + } + } + }, "NetworkEntityInfoExternalSource": { "type": "object", "properties": { @@ -499,7 +520,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -564,34 +585,13 @@ "type": "string" }, "deployment": { - "$ref": "#/definitions/storageNetworkEntityInfoDeployment" + "$ref": "#/definitions/NetworkEntityInfoDeployment" }, "externalSource": { "$ref": "#/definitions/NetworkEntityInfoExternalSource" } } }, - "storageNetworkEntityInfoDeployment": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "cluster": { - "type": "string" - }, - "listenPorts": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/DeploymentListenPort" - } - } - } - }, "storageNetworkEntityInfoType": { "type": "string", "enum": [ diff --git a/generated/api/v1/network_policy_service.swagger.json b/generated/api/v1/network_policy_service.swagger.json index ae75bb10bff23..6cae046bb1aa1 100644 --- a/generated/api/v1/network_policy_service.swagger.json +++ b/generated/api/v1/network_policy_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -71,7 +71,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -101,7 +101,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -139,7 +139,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -177,7 +177,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -207,7 +207,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -256,7 +256,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -294,7 +294,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -357,7 +357,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -387,7 +387,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -450,7 +450,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -498,7 +498,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -528,7 +528,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -558,7 +558,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -588,7 +588,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -630,6 +630,56 @@ "default": "UNKNOWN", "description": " - NONE: Do not delete any existing network policies.\n - GENERATED_ONLY: Delete any existing *auto-generated* network policies.\n - ALL: Delete all existing network policies in the respective namespace." }, + "LabelSelectorOperator": { + "type": "string", + "enum": [ + "UNKNOWN", + "IN", + "NOT_IN", + "EXISTS", + "NOT_EXISTS" + ], + "default": "UNKNOWN" + }, + "LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/LabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, + "NetworkEntityInfoDeployment": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "cluster": { + "type": "string" + }, + "listenPorts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/DeploymentListenPort" + } + } + } + }, "NetworkEntityInfoExternalSource": { "type": "object", "properties": { @@ -669,7 +719,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -740,42 +790,13 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageLabelSelectorRequirement" + "$ref": "#/definitions/LabelSelectorRequirement" } } }, "description": "Next available tag: 3", "title": "Label selector components are joined with logical AND, see\n https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/" }, - "storageLabelSelectorOperator": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN", - "NOT_IN", - "EXISTS", - "NOT_EXISTS" - ], - "default": "UNKNOWN" - }, - "storageLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageNetworkBaselineConnectionProperties": { "type": "object", "properties": { @@ -804,34 +825,13 @@ "type": "string" }, "deployment": { - "$ref": "#/definitions/storageNetworkEntityInfoDeployment" + "$ref": "#/definitions/NetworkEntityInfoDeployment" }, "externalSource": { "$ref": "#/definitions/NetworkEntityInfoExternalSource" } } }, - "storageNetworkEntityInfoDeployment": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "cluster": { - "type": "string" - }, - "listenPorts": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/DeploymentListenPort" - } - } - } - }, "storageNetworkEntityInfoType": { "type": "string", "enum": [ diff --git a/generated/api/v1/node_service.swagger.json b/generated/api/v1/node_service.swagger.json index 80e5a0bab913f..c231c8045878d 100644 --- a/generated/api/v1/node_service.swagger.json +++ b/generated/api/v1/node_service.swagger.json @@ -29,7 +29,7 @@ "$ref": "#/definitions/v1ExportNodeResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1ExportNodeResponse" @@ -38,7 +38,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -75,7 +75,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -105,7 +105,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -130,6 +130,20 @@ } }, "definitions": { + "CVEInfoReference": { + "type": "object", + "properties": { + "URI": { + "type": "string" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "CVSSV2AccessComplexity": { "type": "string", "enum": [ @@ -193,7 +207,7 @@ ], "default": "SCANNER" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -274,7 +288,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageCVEInfoReference" + "$ref": "#/definitions/CVEInfoReference" } }, "cvssMetrics": { @@ -290,20 +304,6 @@ } } }, - "storageCVEInfoReference": { - "type": "object", - "properties": { - "URI": { - "type": "string" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, "storageCVEInfoScoreVersion": { "type": "string", "enum": [ diff --git a/generated/api/v1/notifications.swagger.json b/generated/api/v1/notifications.swagger.json index ff10e5a63107b..bf06cde08ea7f 100644 --- a/generated/api/v1/notifications.swagger.json +++ b/generated/api/v1/notifications.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/notifier_service.swagger.json b/generated/api/v1/notifier_service.swagger.json index 74d23e5c7a315..2f8c1b5efbed0 100644 --- a/generated/api/v1/notifier_service.swagger.json +++ b/generated/api/v1/notifier_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -84,7 +84,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -117,7 +117,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -150,7 +150,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -179,7 +179,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -214,7 +214,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -253,7 +253,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -492,7 +492,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/pagination.swagger.json b/generated/api/v1/pagination.swagger.json index 7d320bf7dc476..be9172d3eac84 100644 --- a/generated/api/v1/pagination.swagger.json +++ b/generated/api/v1/pagination.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/ping_service.swagger.json b/generated/api/v1/ping_service.swagger.json index 1b04bed5ed1d6..192bd11d5784b 100644 --- a/generated/api/v1/ping_service.swagger.json +++ b/generated/api/v1/ping_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -40,7 +40,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/pod_service.swagger.json b/generated/api/v1/pod_service.swagger.json index 48cea0e72910d..cb2ff3dfc3dd6 100644 --- a/generated/api/v1/pod_service.swagger.json +++ b/generated/api/v1/pod_service.swagger.json @@ -29,7 +29,7 @@ "$ref": "#/definitions/v1ExportPodResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1ExportPodResponse" @@ -38,7 +38,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -76,7 +76,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -152,7 +152,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/policy_category_service.swagger.json b/generated/api/v1/policy_category_service.swagger.json index d5cb6e16f1d38..7c631508e36af 100644 --- a/generated/api/v1/policy_category_service.swagger.json +++ b/generated/api/v1/policy_category_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -135,7 +135,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -168,7 +168,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -197,7 +197,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -216,7 +216,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/policy_service.swagger.json b/generated/api/v1/policy_service.swagger.json index 628389e01e3ad..97f080b6d1e12 100644 --- a/generated/api/v1/policy_service.swagger.json +++ b/generated/api/v1/policy_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -143,7 +143,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -175,7 +175,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -207,7 +207,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -235,7 +235,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -266,7 +266,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -298,7 +298,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -331,7 +331,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -364,7 +364,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -387,7 +387,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -416,7 +416,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -445,7 +445,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -482,7 +482,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -521,7 +521,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -559,7 +559,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -598,7 +598,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -609,6 +609,39 @@ } }, "definitions": { + "DryRunResponseAlert": { + "type": "object", + "properties": { + "deployment": { + "type": "string" + }, + "violations": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "ExclusionDeployment": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "scope": { + "$ref": "#/definitions/storageScope" + } + } + }, + "ExclusionImage": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, "PolicyMitreAttackVectors": { "type": "object", "properties": { @@ -771,7 +804,7 @@ }, "title": "Next tag: 28" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -841,10 +874,10 @@ "type": "string" }, "deployment": { - "$ref": "#/definitions/storageExclusionDeployment" + "$ref": "#/definitions/ExclusionDeployment" }, "image": { - "$ref": "#/definitions/storageExclusionImage" + "$ref": "#/definitions/ExclusionImage" }, "expiration": { "type": "string", @@ -852,25 +885,6 @@ } } }, - "storageExclusionDeployment": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "scope": { - "$ref": "#/definitions/storageScope" - } - } - }, - "storageExclusionImage": { - "type": "object", - "properties": { - "name": { - "type": "string" - } - } - }, "storageExportPoliciesResponse": { "type": "object", "properties": { @@ -1249,21 +1263,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/v1DryRunResponseAlert" - } - } - } - }, - "v1DryRunResponseAlert": { - "type": "object", - "properties": { - "deployment": { - "type": "string" - }, - "violations": { - "type": "array", - "items": { - "type": "string" + "$ref": "#/definitions/DryRunResponseAlert" } } } diff --git a/generated/api/v1/probe_upload_service.swagger.json b/generated/api/v1/probe_upload_service.swagger.json index a37ed6c97b29b..9a6a0c138ca85 100644 --- a/generated/api/v1/probe_upload_service.swagger.json +++ b/generated/api/v1/probe_upload_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -52,7 +52,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/process_baseline_service.swagger.json b/generated/api/v1/process_baseline_service.swagger.json index 9231b89cd6399..8ab759940ec00 100644 --- a/generated/api/v1/process_baseline_service.swagger.json +++ b/generated/api/v1/process_baseline_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -65,7 +65,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -98,7 +98,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -131,7 +131,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -164,7 +164,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -214,7 +214,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -235,7 +235,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/process_listening_on_port_service.swagger.json b/generated/api/v1/process_listening_on_port_service.swagger.json index e56cd2fe99d4f..7953707c2f48a 100644 --- a/generated/api/v1/process_listening_on_port_service.swagger.json +++ b/generated/api/v1/process_listening_on_port_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -118,7 +118,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/process_service.swagger.json b/generated/api/v1/process_service.swagger.json index c85a7378de841..c517bfb0d3de2 100644 --- a/generated/api/v1/process_service.swagger.json +++ b/generated/api/v1/process_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -106,7 +106,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -137,7 +137,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -168,7 +168,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -199,7 +199,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/rbac_service.swagger.json b/generated/api/v1/rbac_service.swagger.json index 50ae097b114bf..7135cbec87026 100644 --- a/generated/api/v1/rbac_service.swagger.json +++ b/generated/api/v1/rbac_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -134,7 +134,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -209,7 +209,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -240,7 +240,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -270,7 +270,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -334,7 +334,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/report_configuration_service.swagger.json b/generated/api/v1/report_configuration_service.swagger.json index a391aeb99bee1..2e9a8e2a6dea6 100644 --- a/generated/api/v1/report_configuration_service.swagger.json +++ b/generated/api/v1/report_configuration_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -105,7 +105,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -179,7 +179,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -211,7 +211,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -240,7 +240,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -269,7 +269,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -364,6 +364,35 @@ } } }, + "SetBasedLabelSelectorOperator": { + "type": "string", + "enum": [ + "UNKNOWN", + "IN", + "NOT_IN", + "EXISTS", + "NOT_EXISTS" + ], + "default": "UNKNOWN" + }, + "SetBasedLabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/SetBasedLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, "SimpleAccessScopeRules": { "type": "object", "properties": { @@ -426,7 +455,7 @@ ], "default": "DEPLOYED" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -596,41 +625,12 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageSetBasedLabelSelectorRequirement" + "$ref": "#/definitions/SetBasedLabelSelectorRequirement" } } }, "description": "SetBasedLabelSelector only allows set-based label requirements.\n\nNext available tag: 3" }, - "storageSetBasedLabelSelectorOperator": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN", - "NOT_IN", - "EXISTS", - "NOT_EXISTS" - ], - "default": "UNKNOWN" - }, - "storageSetBasedLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageSetBasedLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageSlimUser": { "type": "object", "properties": { diff --git a/generated/api/v1/report_service.swagger.json b/generated/api/v1/report_service.swagger.json index 5dcb415149872..eb11e40828aa0 100644 --- a/generated/api/v1/report_service.swagger.json +++ b/generated/api/v1/report_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -48,7 +48,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/resource_collection_service.swagger.json b/generated/api/v1/resource_collection_service.swagger.json index b03995e23bd33..dfa43cc273c41 100644 --- a/generated/api/v1/resource_collection_service.swagger.json +++ b/generated/api/v1/resource_collection_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -102,7 +102,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -134,7 +134,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -166,7 +166,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -188,7 +188,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -273,7 +273,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -301,7 +301,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -339,7 +339,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -436,7 +436,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/role_service.swagger.json b/generated/api/v1/role_service.swagger.json index c286bd0c0e8b0..013d95c2c5e72 100644 --- a/generated/api/v1/role_service.swagger.json +++ b/generated/api/v1/role_service.swagger.json @@ -31,7 +31,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -75,7 +75,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -97,7 +97,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -119,7 +119,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -152,7 +152,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -180,7 +180,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -208,7 +208,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -247,7 +247,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -269,7 +269,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -291,7 +291,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -319,7 +319,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -349,7 +349,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -385,7 +385,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -426,7 +426,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -507,7 +507,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -547,7 +547,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -569,7 +569,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -602,7 +602,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -630,7 +630,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -658,7 +658,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -703,6 +703,33 @@ } } }, + "EffectiveAccessScopeCluster": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "state": { + "$ref": "#/definitions/storageEffectiveAccessScopeState" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "namespaces": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/storageEffectiveAccessScopeNamespace" + } + } + } + }, "RoleServicePutPermissionSetBody": { "type": "object", "properties": { @@ -774,6 +801,35 @@ }, "description": "A role specifies which actions are allowed for which subset of cluster\nobjects. Permissions be can either specified directly via setting\nresource_to_access together with global_access or by referencing a\npermission set by its id in permission_set_name." }, + "SetBasedLabelSelectorOperator": { + "type": "string", + "enum": [ + "UNKNOWN", + "IN", + "NOT_IN", + "EXISTS", + "NOT_EXISTS" + ], + "default": "UNKNOWN" + }, + "SetBasedLabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/SetBasedLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, "SimpleAccessScopeRules": { "type": "object", "properties": { @@ -819,7 +875,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -865,39 +921,12 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageEffectiveAccessScopeCluster" + "$ref": "#/definitions/EffectiveAccessScopeCluster" } } }, "description": "EffectiveAccessScope describes which clusters and namespaces are \"in scope\"\ngiven current state. Basically, if AccessScope is applied to the currently\nknown clusters and namespaces, the result is EffectiveAccessScope.\n\nEffectiveAccessScope represents a tree with nodes marked as included and\nexcluded. If a node is included, all its child nodes are included." }, - "storageEffectiveAccessScopeCluster": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "state": { - "$ref": "#/definitions/storageEffectiveAccessScopeState" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "namespaces": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/storageEffectiveAccessScopeNamespace" - } - } - } - }, "storageEffectiveAccessScopeNamespace": { "type": "object", "properties": { @@ -995,41 +1024,12 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageSetBasedLabelSelectorRequirement" + "$ref": "#/definitions/SetBasedLabelSelectorRequirement" } } }, "description": "SetBasedLabelSelector only allows set-based label requirements.\n\nNext available tag: 3" }, - "storageSetBasedLabelSelectorOperator": { - "type": "string", - "enum": [ - "UNKNOWN", - "IN", - "NOT_IN", - "EXISTS", - "NOT_EXISTS" - ], - "default": "UNKNOWN" - }, - "storageSetBasedLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageSetBasedLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageSimpleAccessScope": { "type": "object", "properties": { diff --git a/generated/api/v1/sbom.swagger.json b/generated/api/v1/sbom.swagger.json index 7010259cff871..7354252383621 100644 --- a/generated/api/v1/sbom.swagger.json +++ b/generated/api/v1/sbom.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/search_service.swagger.json b/generated/api/v1/search_service.swagger.json index ef767c963b9d2..d05104c7d608b 100644 --- a/generated/api/v1/search_service.swagger.json +++ b/generated/api/v1/search_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -147,7 +147,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -265,7 +265,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -389,7 +389,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/secret_service.swagger.json b/generated/api/v1/secret_service.swagger.json index 7776c31bc5429..c7797ae9d4169 100644 --- a/generated/api/v1/secret_service.swagger.json +++ b/generated/api/v1/secret_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -106,7 +106,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -137,7 +137,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -212,7 +212,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/sensor_upgrade_service.swagger.json b/generated/api/v1/sensor_upgrade_service.swagger.json index c457d9a347969..a324e351aea17 100644 --- a/generated/api/v1/sensor_upgrade_service.swagger.json +++ b/generated/api/v1/sensor_upgrade_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -59,7 +59,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -79,7 +79,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -111,7 +111,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -149,7 +149,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/service_account_service.swagger.json b/generated/api/v1/service_account_service.swagger.json index 6b52d09e7d7f7..3ef7af6e0bb10 100644 --- a/generated/api/v1/service_account_service.swagger.json +++ b/generated/api/v1/service_account_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -104,7 +104,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -123,7 +123,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/service_identity_service.swagger.json b/generated/api/v1/service_identity_service.swagger.json index ad49087876947..2ff99c15cd7c3 100644 --- a/generated/api/v1/service_identity_service.swagger.json +++ b/generated/api/v1/service_identity_service.swagger.json @@ -30,7 +30,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -52,7 +52,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -73,7 +73,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -94,7 +94,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/signal.swagger.json b/generated/api/v1/signal.swagger.json index 14b9e57179b61..20d4dc83d6133 100644 --- a/generated/api/v1/signal.swagger.json +++ b/generated/api/v1/signal.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/signature_integration_service.swagger.json b/generated/api/v1/signature_integration_service.swagger.json index 961e29549b7f8..5a0e5be383376 100644 --- a/generated/api/v1/signature_integration_service.swagger.json +++ b/generated/api/v1/signature_integration_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -50,7 +50,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -82,7 +82,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -110,7 +110,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -138,7 +138,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -200,7 +200,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/telemetry_service.swagger.json b/generated/api/v1/telemetry_service.swagger.json index 39e82eda58185..57e18860196cb 100644 --- a/generated/api/v1/telemetry_service.swagger.json +++ b/generated/api/v1/telemetry_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -73,7 +73,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -93,7 +93,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -131,7 +131,7 @@ } } }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/traits.swagger.json b/generated/api/v1/traits.swagger.json index 7396a3fb1b22b..e039a2b7b6272 100644 --- a/generated/api/v1/traits.swagger.json +++ b/generated/api/v1/traits.swagger.json @@ -12,7 +12,7 @@ ], "paths": {}, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/user_service.swagger.json b/generated/api/v1/user_service.swagger.json index 0c74202795587..77a011bb1f480 100644 --- a/generated/api/v1/user_service.swagger.json +++ b/generated/api/v1/user_service.swagger.json @@ -29,7 +29,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -51,7 +51,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -81,7 +81,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -92,7 +92,7 @@ } }, "definitions": { - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { diff --git a/generated/api/v1/vuln_mgmt_service.swagger.json b/generated/api/v1/vuln_mgmt_service.swagger.json index 3cf96975499a8..0394709bbb261 100644 --- a/generated/api/v1/vuln_mgmt_service.swagger.json +++ b/generated/api/v1/vuln_mgmt_service.swagger.json @@ -31,7 +31,7 @@ "$ref": "#/definitions/v1VulnMgmtExportWorkloadsResponse" }, "error": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } }, "title": "Stream result of v1VulnMgmtExportWorkloadsResponse" @@ -40,7 +40,7 @@ "default": { "description": "An unexpected error response.", "schema": { - "$ref": "#/definitions/googlerpcStatus" + "$ref": "#/definitions/googleRpcStatus" } } }, @@ -125,6 +125,30 @@ } } }, + "EmbeddedImageScanComponentExecutable": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "dependencies": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "EmbeddedVulnerabilityScoreVersion": { + "type": "string", + "enum": [ + "V2", + "V3" + ], + "default": "V2", + "description": "- V2: No unset for automatic backwards compatibility", + "title": "ScoreVersion can be deprecated ROX-26066" + }, "EmbeddedVulnerabilityVulnerabilityType": { "type": "string", "enum": [ @@ -151,6 +175,24 @@ "default": "UNSET", "title": "For any update to EnvVarSource, please also update 'ui/src/messages/common.js'" }, + "LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "op": { + "$ref": "#/definitions/storageLabelSelectorOperator" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "title": "Next available tag: 4" + }, "PortConfigExposureInfo": { "type": "object", "properties": { @@ -250,7 +292,7 @@ ], "default": "NONE" }, - "googlerpcStatus": { + "googleRpcStatus": { "type": "object", "properties": { "code": { @@ -827,7 +869,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageEmbeddedImageScanComponentExecutable" + "$ref": "#/definitions/EmbeddedImageScanComponentExecutable" }, "title": "Values are cleared after moving to cache, remove them from the grpc return as well" }, @@ -837,20 +879,6 @@ }, "title": "Next Tag: 14" }, - "storageEmbeddedImageScanComponentExecutable": { - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "dependencies": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, "storageEmbeddedSecret": { "type": "object", "properties": { @@ -885,7 +913,7 @@ "type": "string" }, "scoreVersion": { - "$ref": "#/definitions/storageEmbeddedVulnerabilityScoreVersion" + "$ref": "#/definitions/EmbeddedVulnerabilityScoreVersion" }, "cvssV2": { "$ref": "#/definitions/storageCVSSV2", @@ -966,16 +994,6 @@ }, "title": "Next Tag: 27" }, - "storageEmbeddedVulnerabilityScoreVersion": { - "type": "string", - "enum": [ - "V2", - "V3" - ], - "default": "V2", - "description": "- V2: No unset for automatic backwards compatibility", - "title": "ScoreVersion can be deprecated ROX-26066" - }, "storageImage": { "type": "object", "properties": { @@ -1270,7 +1288,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/storageLabelSelectorRequirement" + "$ref": "#/definitions/LabelSelectorRequirement" } } }, @@ -1288,24 +1306,6 @@ ], "default": "UNKNOWN" }, - "storageLabelSelectorRequirement": { - "type": "object", - "properties": { - "key": { - "type": "string" - }, - "op": { - "$ref": "#/definitions/storageLabelSelectorOperator" - }, - "values": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "title": "Next available tag: 4" - }, "storageLicense": { "type": "object", "properties": { diff --git a/go.mod b/go.mod index b4c5f0eafc2ff..3bfbdfc330942 100644 --- a/go.mod +++ b/go.mod @@ -68,7 +68,7 @@ require ( github.com/graph-gophers/graphql-go v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-retryablehttp v0.7.8 github.com/hashicorp/go-version v1.8.0 @@ -152,7 +152,7 @@ require ( golang.stackrox.io/grpc-http1 v0.5.1 google.golang.org/api v0.267.0 google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 - google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 + google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 google.golang.org/grpc v1.79.1 google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 google.golang.org/protobuf v1.36.11 @@ -504,7 +504,7 @@ require ( go.yaml.in/yaml/v2 v2.4.3 // indirect golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect diff --git a/go.sum b/go.sum index 94e079032a47d..6d72dd91c2041 100644 --- a/go.sum +++ b/go.sum @@ -885,8 +885,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBH github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -2252,10 +2252,10 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= -google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20 h1:7ei4lp52gK1uSejlA8AZl5AJjeLUOHBQscRQZUgAcu0= -google.golang.org/genproto/googleapis/api v0.0.0-20260203192932-546029d2fa20/go.mod h1:ZdbssH/1SOVnjnDlXzxDHK2MCidiqXtbYccJNzNYPEE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0= +google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= From c90f37ded124c7c270fccc889e015b7b147e782e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 Feb 2026 18:40:55 -0700 Subject: [PATCH 223/232] chore(deps): bump github.com/kudobuilder/kuttl from 0.24.1-0.20251223085747-6c1c96eb4abd to 0.25.0 in /operator/tools/kuttl (#19085) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- operator/tools/kuttl/go.mod | 82 +++++++------- operator/tools/kuttl/go.sum | 217 +++++++++++++++++------------------- 2 files changed, 148 insertions(+), 151 deletions(-) diff --git a/operator/tools/kuttl/go.mod b/operator/tools/kuttl/go.mod index dc32532faeb0e..1106f50992ed3 100644 --- a/operator/tools/kuttl/go.mod +++ b/operator/tools/kuttl/go.mod @@ -1,16 +1,19 @@ module github.com/stackrox/rox/operator/tools/kuttl -go 1.25 +go 1.25.0 -require github.com/kudobuilder/kuttl v0.24.1-0.20251223085747-6c1c96eb4abd +require github.com/kudobuilder/kuttl v0.25.0 require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.24.0 // indirect + cel.dev/expr v0.25.1 // indirect + dario.cat/mergo v1.0.1 // indirect github.com/BurntSushi/toml v1.4.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect - github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect @@ -33,75 +36,78 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.26.1 // indirect + github.com/google/cel-go v0.27.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.22.1 // indirect - github.com/onsi/gomega v1.36.2 // indirect + github.com/onsi/gomega v1.38.3 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/pflag v1.0.10 // indirect - github.com/stoewer/go-strcase v1.3.0 // indirect github.com/thoas/go-funk v0.9.3 // indirect github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/protobuf v1.36.7 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.34.3 // indirect - k8s.io/apiextensions-apiserver v0.34.3 // indirect - k8s.io/apimachinery v0.34.3 // indirect - k8s.io/client-go v0.34.3 // indirect + k8s.io/api v0.35.1 // indirect + k8s.io/apiextensions-apiserver v0.35.1 // indirect + k8s.io/apimachinery v0.35.1 // indirect + k8s.io/client-go v0.35.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect - sigs.k8s.io/controller-runtime v0.22.4 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/kind v0.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + sigs.k8s.io/controller-runtime v0.23.1 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/kind v0.31.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/operator/tools/kuttl/go.sum b/operator/tools/kuttl/go.sum index 2ca956aeda30f..3f1c4cc48abfd 100644 --- a/operator/tools/kuttl/go.sum +++ b/operator/tools/kuttl/go.sum @@ -1,17 +1,23 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= -github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -50,6 +56,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= @@ -71,12 +79,10 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= -github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo= +github.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -84,22 +90,22 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -109,14 +115,18 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kudobuilder/kuttl v0.24.1-0.20251223085747-6c1c96eb4abd h1:2KHSb/Y3Z6pcYo2TQENAW0lLW9iJ7OI7WtJhh2pREbw= -github.com/kudobuilder/kuttl v0.24.1-0.20251223085747-6c1c96eb4abd/go.mod h1:h4+K9Lm4mdFisj2f7lMMeiHh7+0R8t3ccRqpSxjxps0= +github.com/kudobuilder/kuttl v0.25.0 h1:xP16UKI4HjXgk4sIeqw1TeMziOsnat8WNAl7NmCDGvw= +github.com/kudobuilder/kuttl v0.25.0/go.mod h1:nK7F7YFZb3FLR6baNCT55lOeBLb/LV/DoGAXsCe3t2s= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= @@ -135,10 +145,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= -github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -150,27 +160,29 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= -github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -188,24 +200,24 @@ github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -214,71 +226,50 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA= +golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -289,29 +280,29 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= -k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4= -k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk= -k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g= -k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0= -k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE= -k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A= -k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM= +k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= +k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= +k8s.io/apiextensions-apiserver v0.35.1 h1:p5vvALkknlOcAqARwjS20kJffgzHqwyQRM8vHLwgU7w= +k8s.io/apiextensions-apiserver v0.35.1/go.mod h1:2CN4fe1GZ3HMe4wBr25qXyJnJyZaquy4nNlNmb3R7AQ= +k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= +k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= +k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= -sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= +sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kind v0.31.0 h1:UcT4nzm+YM7YEbqiAKECk+b6dsvc/HRZZu9U0FolL1g= +sigs.k8s.io/kind v0.31.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= From 74c8e059e36b20f31be68383ec2b2b4e6293920a Mon Sep 17 00:00:00 2001 From: David House <105243888+davdhacs@users.noreply.github.com> Date: Thu, 19 Feb 2026 00:29:37 -0700 Subject: [PATCH 224/232] fix(ci): save all Go caches on master (#19069) Co-authored-by: Claude Opus 4.6 (1M context) --- .github/workflows/scanner-db-integration-tests.yaml | 2 -- .github/workflows/style.yaml | 4 ---- .github/workflows/unit-tests.yaml | 10 ---------- 3 files changed, 16 deletions(-) diff --git a/.github/workflows/scanner-db-integration-tests.yaml b/.github/workflows/scanner-db-integration-tests.yaml index 50a07649c9206..26e0f1d4969f4 100644 --- a/.github/workflows/scanner-db-integration-tests.yaml +++ b/.github/workflows/scanner-db-integration-tests.yaml @@ -35,8 +35,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~62 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index df9e2ca2152e9..6835256aafb1b 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -41,8 +41,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~752 MB, style-dominated, no critical path speedup - name: Check Generated run: scripts/ci/jobs/check-generated.sh @@ -111,8 +109,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~2,183 MB, style-dominated, no critical path speedup - name: Cache UI dependencies uses: ./.github/actions/cache-ui-dependencies diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index 60bb610c9fc5b..325ad94ba83a2 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -42,8 +42,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~2,000 MB, test-dominated, no critical path speedup - name: Go Unit Tests run: ${{ matrix.gotags }} make go-unit-tests @@ -124,8 +122,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~913 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -186,8 +182,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~610 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -309,8 +303,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~1,163 MB, test-dominated, no critical path speedup - uses: ./.github/actions/handle-tagged-build @@ -411,8 +403,6 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies - with: - save: false # ~626 MB, test-dominated, no critical path speedup - name: Login to Quay.io uses: docker/login-action@v3 From 2d25d53aac438dc83aa827890706866c695537fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 01:29:19 -0700 Subject: [PATCH 225/232] chore(deps): bump the aws-sdk-go-v2 group with 3 updates (#19104) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 3bfbdfc330942..888e3d4ccfb97 100644 --- a/go.mod +++ b/go.mod @@ -26,10 +26,10 @@ require ( github.com/adhocore/gronx v1.19.6 github.com/andygrunwald/go-jira v1.17.0 github.com/aws/aws-sdk-go-v2 v1.41.1 - github.com/aws/aws-sdk-go-v2/config v1.32.8 - github.com/aws/aws-sdk-go-v2/credentials v1.19.8 + github.com/aws/aws-sdk-go-v2/config v1.32.9 + github.com/aws/aws-sdk-go-v2/credentials v1.19.9 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2 github.com/aws/aws-sdk-go-v2/service/ecr v1.55.2 github.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4 @@ -242,7 +242,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect diff --git a/go.sum b/go.sum index 6d72dd91c2041..3867b44f43d41 100644 --- a/go.sum +++ b/go.sum @@ -289,14 +289,14 @@ github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6ce github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= -github.com/aws/aws-sdk-go-v2/config v1.32.8 h1:iu+64gwDKEoKnyTQskSku72dAwggKI5sV6rNvgSMpMs= -github.com/aws/aws-sdk-go-v2/config v1.32.8/go.mod h1:MI2XvA+qDi3i9AJxX1E2fu730syEBzp/jnXrjxuHwgI= -github.com/aws/aws-sdk-go-v2/credentials v1.19.8 h1:Jp2JYH1lRT3KhX4mshHPvVYsR5qqRec3hGvEarNYoR0= -github.com/aws/aws-sdk-go-v2/credentials v1.19.8/go.mod h1:fZG9tuvyVfxknv1rKibIz3DobRaFw1Poe8IKtXB3XYY= +github.com/aws/aws-sdk-go-v2/config v1.32.9 h1:ktda/mtAydeObvJXlHzyGpK1xcsLaP16zfUPDGoW90A= +github.com/aws/aws-sdk-go-v2/config v1.32.9/go.mod h1:U+fCQ+9QKsLW786BCfEjYRj34VVTbPdsLP3CHSYXMOI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.9 h1:sWvTKsyrMlJGEuj/WgrwilpoJ6Xa1+KhIpGdzw7mMU8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.9/go.mod h1:+J44MBhmfVY/lETFiKI+klz0Vym2aCmIjqgClMmW82w= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1 h1:IbWiN670htmBioc+Zj32vSpJgQ2+OYSlvTvfQ1nCORQ= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.1/go.mod h1:tw/B596EUhBWDFGdDGuLC21fVU4A3s4/5Efy8S39W18= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2 h1:1i1SUOTLk0TbMh7+eJYxgv1r1f47BfR69LL6yaELoI0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.2/go.mod h1:bo7DhmS/OyVeAJTC768nEk92YKWskqJ4gn0gB5e59qQ= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= @@ -325,8 +325,8 @@ github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4 h1:6zZdm6bqf975SA+6+lWl github.com/aws/aws-sdk-go-v2/service/securityhub v1.67.4/go.mod h1:3wnS16Wip5w0uh9kVFBhuMFmdkrMBr8Fc96kAY5h13o= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.10 h1:+VTRawC4iVY58pS/lzpo0lnoa/SYNGF4/B/3/U5ro8Y= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.10/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14 h1:0jbJeuEHlwKJ9PfXtpSFc4MF+WIWORdhN1n30ITZGFM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.14/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= From 12944c9da5ed1f758f33951593b3952be28a6dbb Mon Sep 17 00:00:00 2001 From: Aleksandr Kurlov Date: Thu, 19 Feb 2026 10:44:39 +0100 Subject: [PATCH 226/232] ROX-32966: Set konflux retest command alias (#18917) --- .github/workflows/konflux-auto-retest.yml | 1 + .tekton/central-db-build.yaml | 1 + .tekton/create-custom-snapshot.yaml | 1 + .tekton/main-build.yaml | 1 + .tekton/operator-build.yaml | 1 + .tekton/operator-bundle-build.yaml | 1 + .tekton/retag-collector.yaml | 1 + .tekton/retag-fact.yaml | 1 + .tekton/retag-scanner-db-slim.yaml | 1 + .tekton/retag-scanner-db.yaml | 1 + .tekton/retag-scanner-slim.yaml | 1 + .tekton/retag-scanner.yaml | 1 + .tekton/roxctl-build.yaml | 1 + .tekton/scanner-v4-build.yaml | 1 + .tekton/scanner-v4-db-build.yaml | 1 + 15 files changed, 15 insertions(+) diff --git a/.github/workflows/konflux-auto-retest.yml b/.github/workflows/konflux-auto-retest.yml index f0ecb4cfc89ea..817d918456c47 100644 --- a/.github/workflows/konflux-auto-retest.yml +++ b/.github/workflows/konflux-auto-retest.yml @@ -15,3 +15,4 @@ jobs: with: max_retries: 3 check_name_suffix: '-on-push' + retest_command: '/konflux-retest' diff --git a/.tekton/central-db-build.yaml b/.tekton/central-db-build.yaml index 9076f0e1063c6..2123bba19fff3 100644 --- a/.tekton/central-db-build.yaml +++ b/.tekton/central-db-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest central-db-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/create-custom-snapshot.yaml b/.tekton/create-custom-snapshot.yaml index c90b1dacfaa5a..4ae4ad7ffcd01 100644 --- a/.tekton/create-custom-snapshot.yaml +++ b/.tekton/create-custom-snapshot.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest create-custom-snapshot" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/main-build.yaml b/.tekton/main-build.yaml index a59403e0cbc35..354140c1574b6 100644 --- a/.tekton/main-build.yaml +++ b/.tekton/main-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest main-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/operator-build.yaml b/.tekton/operator-build.yaml index c7f3b2df2c97e..379f393bf135a 100644 --- a/.tekton/operator-build.yaml +++ b/.tekton/operator-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest operator-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/operator-bundle-build.yaml b/.tekton/operator-bundle-build.yaml index e729253421329..50fe934add13e 100644 --- a/.tekton/operator-bundle-build.yaml +++ b/.tekton/operator-bundle-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest operator-bundle-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-collector.yaml b/.tekton/retag-collector.yaml index 402dfb62469e1..d9c526eba9dc5 100644 --- a/.tekton/retag-collector.yaml +++ b/.tekton/retag-collector.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-collector" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-fact.yaml b/.tekton/retag-fact.yaml index 6b5e23daae706..e41677be00dd6 100644 --- a/.tekton/retag-fact.yaml +++ b/.tekton/retag-fact.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-fact" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-scanner-db-slim.yaml b/.tekton/retag-scanner-db-slim.yaml index 581a18e658190..b9554d16be051 100644 --- a/.tekton/retag-scanner-db-slim.yaml +++ b/.tekton/retag-scanner-db-slim.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-scanner-db-slim" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-scanner-db.yaml b/.tekton/retag-scanner-db.yaml index 500aa96403280..36865cf08e4b4 100644 --- a/.tekton/retag-scanner-db.yaml +++ b/.tekton/retag-scanner-db.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-scanner-db" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-scanner-slim.yaml b/.tekton/retag-scanner-slim.yaml index 9a0347886450c..dcac863485fa7 100644 --- a/.tekton/retag-scanner-slim.yaml +++ b/.tekton/retag-scanner-slim.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-scanner-slim" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/retag-scanner.yaml b/.tekton/retag-scanner.yaml index 1ef222b253e72..8875548f55572 100644 --- a/.tekton/retag-scanner.yaml +++ b/.tekton/retag-scanner.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest retag-scanner" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/roxctl-build.yaml b/.tekton/roxctl-build.yaml index a5386f4c3a007..00df6042eadf1 100644 --- a/.tekton/roxctl-build.yaml +++ b/.tekton/roxctl-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest roxctl-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/scanner-v4-build.yaml b/.tekton/scanner-v4-build.yaml index c6c890a261eb1..4eec4bfeca0b9 100644 --- a/.tekton/scanner-v4-build.yaml +++ b/.tekton/scanner-v4-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest scanner-v4-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( diff --git a/.tekton/scanner-v4-db-build.yaml b/.tekton/scanner-v4-db-build.yaml index 5b74dc9b763eb..a20eca95dd84b 100644 --- a/.tekton/scanner-v4-db-build.yaml +++ b/.tekton/scanner-v4-db-build.yaml @@ -8,6 +8,7 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "500" + pipelinesascode.tekton.dev/on-comment: "/konflux-retest scanner-v4-db-on-push" # TODO(ROX-21073): re-enable for all PR branches pipelinesascode.tekton.dev/on-cel-expression: | ( From 7dff17d951ffa2ace20d425a6c59e59acff6bf2e Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Thu, 19 Feb 2026 06:01:24 -0500 Subject: [PATCH 227/232] ROX-32965: throttle reprocessing of deployments (#18919) --- .../evaluator/evaluator_integration_test.go | 5 - .../service/pipeline/reprocessing/pipeline.go | 83 ++++++++ .../pipeline/reprocessing/pipeline_test.go | 178 ++++++++++++++++++ pkg/env/reprocessing_interval.go | 7 + 4 files changed, 268 insertions(+), 5 deletions(-) create mode 100644 central/sensor/service/pipeline/reprocessing/pipeline_test.go diff --git a/central/processbaseline/evaluator/evaluator_integration_test.go b/central/processbaseline/evaluator/evaluator_integration_test.go index b206509330c25..276e7d66aaee9 100644 --- a/central/processbaseline/evaluator/evaluator_integration_test.go +++ b/central/processbaseline/evaluator/evaluator_integration_test.go @@ -13,7 +13,6 @@ import ( "github.com/stackrox/rox/central/processindicator/views" "github.com/stackrox/rox/generated/storage" "github.com/stackrox/rox/pkg/fixtures" - "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/postgres" "github.com/stackrox/rox/pkg/postgres/pgtest" "github.com/stackrox/rox/pkg/protoconv" @@ -24,8 +23,6 @@ import ( "github.com/stretchr/testify/suite" ) -var log = logging.LoggerForModule() - func TestProcessBaselineEvaluatorIntegration(t *testing.T) { suite.Run(t, new(ProcessBaselineEvaluatorIntegrationTestSuite)) } @@ -195,8 +192,6 @@ func (suite *ProcessBaselineEvaluatorIntegrationTestSuite) TestLockedProcessBase suite.NoError(err) suite.NotNil(persistedResult) suite.Len(persistedResult.GetBaselineStatuses(), 2) - log.Infof("SHREWS -- %v", persistedResult) - for _, status := range persistedResult.GetBaselineStatuses() { // We only locked the first container if status.GetContainerName() == containerName { diff --git a/central/sensor/service/pipeline/reprocessing/pipeline.go b/central/sensor/service/pipeline/reprocessing/pipeline.go index c7e274ade9b2c..fce2986ddd93f 100644 --- a/central/sensor/service/pipeline/reprocessing/pipeline.go +++ b/central/sensor/service/pipeline/reprocessing/pipeline.go @@ -3,6 +3,8 @@ package reprocessing import ( "context" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" "github.com/stackrox/rox/central/deployment/datastore" "github.com/stackrox/rox/central/deployment/utils" "github.com/stackrox/rox/central/detection/lifecycle" @@ -14,15 +16,43 @@ import ( "github.com/stackrox/rox/central/sensor/service/pipeline/reconciliation" "github.com/stackrox/rox/generated/internalapi/central" "github.com/stackrox/rox/pkg/centralsensor" + "github.com/stackrox/rox/pkg/env" "github.com/stackrox/rox/pkg/features" + "github.com/stackrox/rox/pkg/logging" "github.com/stackrox/rox/pkg/metrics" "github.com/stackrox/rox/pkg/search" + "golang.org/x/sync/semaphore" ) var ( + log = logging.LoggerForModule() + _ pipeline.Fragment = (*pipelineImpl)(nil) + + riskSemaphoreQueueSize = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: metrics.PrometheusNamespace, + Subsystem: "central", + Name: "deployment_risk_semaphore_queue_size", + Help: "Number of deployment risk reprocessing operations waiting for a semaphore slot.", + }) + riskSemaphoreHoldingSize = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: metrics.PrometheusNamespace, + Subsystem: "central", + Name: "deployment_risk_semaphore_holding_size", + Help: "Number of deployment risk reprocessing operations currently holding a semaphore slot.", + }) + riskSemaphoreTimeouts = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: metrics.PrometheusNamespace, + Subsystem: "central", + Name: "deployment_risk_semaphore_timeouts_total", + Help: "Total number of deployment risk reprocessing operations that timed out waiting for a semaphore slot.", + }) ) +func init() { + prometheus.MustRegister(riskSemaphoreQueueSize, riskSemaphoreHoldingSize, riskSemaphoreTimeouts) +} + // GetPipeline returns an instantiation of this particular pipeline func GetPipeline() pipeline.Fragment { return NewPipeline(datastore.Singleton(), lifecycle.SingletonManager(), riskManager.Singleton(), reprocessor.Singleton()) @@ -30,11 +60,13 @@ func GetPipeline() pipeline.Fragment { // NewPipeline returns a new instance of Pipeline. func NewPipeline(deployments datastore.DataStore, manager lifecycle.Manager, riskManager riskManager.Manager, riskReprocessor reprocessor.Loop) pipeline.Fragment { + maxConcurrency := int64(env.DeploymentRiskMaxConcurrency.IntegerSetting()) return &pipelineImpl{ riskManager: riskManager, riskReprocessor: riskReprocessor, manager: manager, deployments: deployments, + riskSemaphore: semaphore.NewWeighted(maxConcurrency), } } @@ -43,6 +75,7 @@ type pipelineImpl struct { riskManager riskManager.Manager riskReprocessor reprocessor.Loop manager lifecycle.Manager + riskSemaphore *semaphore.Weighted } func (s *pipelineImpl) Capabilities() []centralsensor.CentralCapability { @@ -68,6 +101,16 @@ func (s *pipelineImpl) Match(msg *central.MsgFromSensor) bool { func (s *pipelineImpl) Run(ctx context.Context, _ string, msg *central.MsgFromSensor, _ common.MessageInjector) error { defer countMetrics.IncrementResourceProcessedCounter(pipeline.ActionToOperation(msg.GetEvent().GetAction()), metrics.DeploymentReprocess) + // Throttle concurrent risk reprocessing to prevent DB connection pool exhaustion. + // + // A timeout is applied to prevent indefinite blocking if risk operations are stuck. + // On timeout, the operation is dropped -- it will be retried on the next reprocessing + // cycle. + if err := s.acquireRiskSemaphore(ctx); err != nil { + return err + } + defer s.releaseRiskSemaphore() + reprocessMsg := msg.GetEvent().GetReprocessDeployment() deployment, exists, err := s.deployments.GetDeployment(ctx, reprocessMsg.GetDeploymentId()) @@ -84,4 +127,44 @@ func (s *pipelineImpl) Run(ctx context.Context, _ string, msg *central.MsgFromSe return nil } +// acquireRiskSemaphore acquires the risk reprocessing semaphore with an optional timeout. +// This follows the same pattern as the image scan semaphore in central/image/service. +func (s *pipelineImpl) acquireRiskSemaphore(ctx context.Context) error { + waitTime := env.DeploymentRiskSemaphoreWaitTime.DurationSetting() + + acquireCtx := ctx + if waitTime > 0 { + var cancel context.CancelFunc + acquireCtx, cancel = context.WithTimeout(ctx, waitTime) + defer cancel() + } + + riskSemaphoreQueueSize.Inc() + defer riskSemaphoreQueueSize.Dec() + + if err := s.riskSemaphore.Acquire(acquireCtx, 1); err != nil { + if ctx.Err() != nil { + // Parent context was cancelled (sensor disconnected). This is expected. + log.Debugf("Unable to acquire context to reprocess deployment risk: %v", err) + } else if errors.Is(err, context.DeadlineExceeded) { + // Semaphore wait timed out... + riskSemaphoreTimeouts.Inc() + log.Warnf("Timed out waiting to reprocess deployment risk (waited %v, queue is full): %v", + waitTime, err) + } else { + // unexpected error + log.Errorf("Unexpected error acquiring risk semaphore: %v", err) + } + return err + } + + riskSemaphoreHoldingSize.Inc() + return nil +} + +func (s *pipelineImpl) releaseRiskSemaphore() { + s.riskSemaphore.Release(1) + riskSemaphoreHoldingSize.Dec() +} + func (s *pipelineImpl) OnFinish(_ string) {} diff --git a/central/sensor/service/pipeline/reprocessing/pipeline_test.go b/central/sensor/service/pipeline/reprocessing/pipeline_test.go new file mode 100644 index 0000000000000..3e27b71f656e0 --- /dev/null +++ b/central/sensor/service/pipeline/reprocessing/pipeline_test.go @@ -0,0 +1,178 @@ +package reprocessing + +import ( + "context" + "sync/atomic" + "testing" + "time" + + deploymentMocks "github.com/stackrox/rox/central/deployment/datastore/mocks" + lifecycleMocks "github.com/stackrox/rox/central/detection/lifecycle/mocks" + reprocessorMocks "github.com/stackrox/rox/central/reprocessor/mocks" + riskManagerMocks "github.com/stackrox/rox/central/risk/manager/mocks" + "github.com/stackrox/rox/generated/internalapi/central" + "github.com/stackrox/rox/generated/storage" + "github.com/stackrox/rox/pkg/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + "golang.org/x/sync/semaphore" +) + +func TestRunCallsReprocessDeploymentRisk(t *testing.T) { + ctrl := gomock.NewController(t) + deployments := deploymentMocks.NewMockDataStore(ctrl) + manager := lifecycleMocks.NewMockManager(ctrl) + riskMgr := riskManagerMocks.NewMockManager(ctrl) + reprocessor := reprocessorMocks.NewMockLoop(ctrl) + + p := &pipelineImpl{ + deployments: deployments, + riskManager: riskMgr, + riskReprocessor: reprocessor, + manager: manager, + riskSemaphore: semaphore.NewWeighted(15), + } + + depID := uuid.NewV4().String() + dep := &storage.Deployment{Id: depID, Name: "test"} + deployments.EXPECT().GetDeployment(gomock.Any(), depID).Return(dep, true, nil) + riskMgr.EXPECT().ReprocessDeploymentRisk(dep) + + msg := ¢ral.MsgFromSensor{ + Msg: ¢ral.MsgFromSensor_Event{ + Event: ¢ral.SensorEvent{ + Resource: ¢ral.SensorEvent_ReprocessDeployment{ + ReprocessDeployment: ¢ral.ReprocessDeploymentRisk{ + DeploymentId: depID, + }, + }, + }, + }, + } + + err := p.Run(context.Background(), "cluster-1", msg, nil) + assert.NoError(t, err) +} + +func TestRunThrottlesConcurrency(t *testing.T) { + ctrl := gomock.NewController(t) + deployments := deploymentMocks.NewMockDataStore(ctrl) + manager := lifecycleMocks.NewMockManager(ctrl) + riskMgr := riskManagerMocks.NewMockManager(ctrl) + reprocessor := reprocessorMocks.NewMockLoop(ctrl) + + const maxConcurrency = 3 + p := &pipelineImpl{ + deployments: deployments, + riskManager: riskMgr, + riskReprocessor: reprocessor, + manager: manager, + riskSemaphore: semaphore.NewWeighted(maxConcurrency), + } + + // Track peak concurrency inside ReprocessDeploymentRisk. + var currentConcurrency atomic.Int32 + var peakConcurrency atomic.Int32 + var completed atomic.Int32 + + const numDeployments = 20 + + // Set up mocks: each ReprocessDeploymentRisk call sleeps briefly to simulate DB work. + for i := 0; i < numDeployments; i++ { + depID := uuid.NewV4().String() + dep := &storage.Deployment{Id: depID, Name: "test"} + deployments.EXPECT().GetDeployment(gomock.Any(), depID).Return(dep, true, nil) + riskMgr.EXPECT().ReprocessDeploymentRisk(dep).Do(func(_ *storage.Deployment) { + cur := currentConcurrency.Add(1) + // Update peak + for { + peak := peakConcurrency.Load() + if cur <= peak { + break + } + if peakConcurrency.CompareAndSwap(peak, cur) { + break + } + } + time.Sleep(50 * time.Millisecond) + currentConcurrency.Add(-1) + completed.Add(1) + }) + + msg := ¢ral.MsgFromSensor{ + Msg: ¢ral.MsgFromSensor_Event{ + Event: ¢ral.SensorEvent{ + Resource: ¢ral.SensorEvent_ReprocessDeployment{ + ReprocessDeployment: ¢ral.ReprocessDeploymentRisk{ + DeploymentId: depID, + }, + }, + }, + }, + } + + go func() { + err := p.Run(context.Background(), "cluster-1", msg, nil) + assert.NoError(t, err) + }() + } + + // Wait for all goroutines to finish. + require.Eventually(t, func() bool { + return completed.Load() == numDeployments + }, 10*time.Second, 10*time.Millisecond) + + // The peak concurrency should not exceed our limit. + assert.LessOrEqual(t, peakConcurrency.Load(), int32(maxConcurrency), + "peak concurrency (%d) should not exceed the semaphore limit (%d)", + peakConcurrency.Load(), maxConcurrency) + t.Logf("Peak concurrency: %d (limit: %d)", peakConcurrency.Load(), maxConcurrency) +} + +func TestRunRespectsContextCancellation(t *testing.T) { + ctrl := gomock.NewController(t) + deployments := deploymentMocks.NewMockDataStore(ctrl) + manager := lifecycleMocks.NewMockManager(ctrl) + riskMgr := riskManagerMocks.NewMockManager(ctrl) + reprocessor := reprocessorMocks.NewMockLoop(ctrl) + + // Semaphore with capacity 1, pre-acquired so the next Acquire blocks. + sem := semaphore.NewWeighted(1) + require.NoError(t, sem.Acquire(context.Background(), 1)) + + p := &pipelineImpl{ + deployments: deployments, + riskManager: riskMgr, + riskReprocessor: reprocessor, + manager: manager, + riskSemaphore: sem, + } + + // Note: no GetDeployment expectation because the semaphore is acquired first. + // With a cancelled context and a full semaphore, Acquire returns immediately + // with an error before any DB call is made. + + depID := uuid.NewV4().String() + msg := ¢ral.MsgFromSensor{ + Msg: ¢ral.MsgFromSensor_Event{ + Event: ¢ral.SensorEvent{ + Resource: ¢ral.SensorEvent_ReprocessDeployment{ + ReprocessDeployment: ¢ral.ReprocessDeploymentRisk{ + DeploymentId: depID, + }, + }, + }, + }, + } + + // Cancel context so the semaphore Acquire fails. + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := p.Run(ctx, "cluster-1", msg, nil) + assert.Error(t, err, "Run should return an error when context is cancelled") + + // Release the pre-acquired semaphore to clean up. + sem.Release(1) +} diff --git a/pkg/env/reprocessing_interval.go b/pkg/env/reprocessing_interval.go index 35d547f046a0d..196e06f4f3992 100644 --- a/pkg/env/reprocessing_interval.go +++ b/pkg/env/reprocessing_interval.go @@ -21,4 +21,11 @@ var ( // messages to Sensors at the end of Central image reprocessing. When set to 0, messages are sent as fast // as possible ReprocessDeploymentsMsgDelay = registerDurationSetting("ROX_REPROCESS_DEPLOYMENTS_MSG_DELAY", 0, WithDurationZeroAllowed()) + // DeploymentRiskMaxConcurrency limits how many deployments can have their risk reprocessed + // concurrently across all clusters. + DeploymentRiskMaxConcurrency = RegisterIntegerSetting("ROX_DEPLOYMENT_RISK_MAX_CONCURRENCY", 15).WithMinimum(1).WithMaximum(30) + // DeploymentRiskSemaphoreWaitTime is the maximum time a worker will wait to acquire + // the risk reprocessing semaphore. Setting to zero disables the timeout (workers + // block indefinitely until a slot is available or the sensor disconnects). + DeploymentRiskSemaphoreWaitTime = registerDurationSetting("ROX_DEPLOYMENT_RISK_SEMAPHORE_WAIT_TIME", 2*time.Minute, WithDurationZeroAllowed()) ) From 18b8c866364e92decab328e5b329e5b9212b9e31 Mon Sep 17 00:00:00 2001 From: David Shrewsberry <99685630+dashrews78@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:14:15 -0500 Subject: [PATCH 228/232] chore: revert cache stuff (#19108) --- .github/workflows/scanner-db-integration-tests.yaml | 2 ++ .github/workflows/style.yaml | 4 ++++ .github/workflows/unit-tests.yaml | 10 ++++++++++ 3 files changed, 16 insertions(+) diff --git a/.github/workflows/scanner-db-integration-tests.yaml b/.github/workflows/scanner-db-integration-tests.yaml index 26e0f1d4969f4..50a07649c9206 100644 --- a/.github/workflows/scanner-db-integration-tests.yaml +++ b/.github/workflows/scanner-db-integration-tests.yaml @@ -35,6 +35,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~62 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index 6835256aafb1b..df9e2ca2152e9 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -41,6 +41,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~752 MB, style-dominated, no critical path speedup - name: Check Generated run: scripts/ci/jobs/check-generated.sh @@ -109,6 +111,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~2,183 MB, style-dominated, no critical path speedup - name: Cache UI dependencies uses: ./.github/actions/cache-ui-dependencies diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index 325ad94ba83a2..60bb610c9fc5b 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -42,6 +42,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~2,000 MB, test-dominated, no critical path speedup - name: Go Unit Tests run: ${{ matrix.gotags }} make go-unit-tests @@ -122,6 +124,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~913 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -182,6 +186,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~610 MB, test-dominated, no critical path speedup - name: Is Postgres ready run: pg_isready -h 127.0.0.1 @@ -303,6 +309,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~1,163 MB, test-dominated, no critical path speedup - uses: ./.github/actions/handle-tagged-build @@ -403,6 +411,8 @@ jobs: - name: Cache Go dependencies uses: ./.github/actions/cache-go-dependencies + with: + save: false # ~626 MB, test-dominated, no critical path speedup - name: Login to Quay.io uses: docker/login-action@v3 From 1e9f883dd7ce794b97f94fcf1d5c64f346de2ef6 Mon Sep 17 00:00:00 2001 From: Marcin Owsiany Date: Thu, 19 Feb 2026 17:04:38 +0100 Subject: [PATCH 229/232] ROX-33247: deprecate init bundles APIs (#19106) Co-authored-by: StackRox PR Fixxxer --- generated/api/v1/cluster_init_service.pb.go | 44 +++++++++++-------- .../api/v1/cluster_init_service.swagger.json | 4 +- .../api/v1/cluster_init_service_grpc.pb.go | 15 +++++++ proto/api/v1/cluster_init_service.proto | 12 +++++ 4 files changed, 55 insertions(+), 20 deletions(-) diff --git a/generated/api/v1/cluster_init_service.pb.go b/generated/api/v1/cluster_init_service.pb.go index 495aa78b6fd85..7e47037839767 100644 --- a/generated/api/v1/cluster_init_service.pb.go +++ b/generated/api/v1/cluster_init_service.pb.go @@ -25,6 +25,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleMeta struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` @@ -209,6 +210,7 @@ func (x *CRSMeta) GetRegistrationsCompleted() []string { return nil } +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleGenResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Meta *InitBundleMeta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` @@ -365,6 +367,7 @@ func (x *GetCAConfigResponse) GetHelmValuesBundle() []byte { return nil } +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleMetasResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Items []*InitBundleMeta `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` @@ -453,6 +456,7 @@ func (x *CRSMetasResponse) GetItems() []*CRSMeta { return nil } +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleGenRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -610,6 +614,7 @@ func (x *CRSGenRequestExtended) GetMaxRegistrations() uint64 { return 0 } +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleRevokeRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` @@ -706,6 +711,7 @@ func (x *CRSRevokeRequest) GetIds() []string { return nil } +// Deprecated: Marked as deprecated in api/v1/cluster_init_service.proto. type InitBundleRevokeResponse struct { state protoimpl.MessageState `protogen:"open.v1"` InitBundleRevocationErrors []*InitBundleRevokeResponse_InitBundleRevocationError `protobuf:"bytes,2,rep,name=init_bundle_revocation_errors,json=initBundleRevocationErrors,proto3" json:"init_bundle_revocation_errors,omitempty"` @@ -978,7 +984,7 @@ var File_api_v1_cluster_init_service_proto protoreflect.FileDescriptor const file_api_v1_cluster_init_service_proto_rawDesc = "" + "\n" + - "!api/v1/cluster_init_service.proto\x12\x02v1\x1a\x12api/v1/empty.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x12storage/user.proto\"\xe0\x02\n" + + "!api/v1/cluster_init_service.proto\x12\x02v1\x1a\x12api/v1/empty.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x12storage/user.proto\"\xe4\x02\n" + "\x0eInitBundleMeta\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12O\n" + @@ -991,7 +997,7 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "expires_at\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x1a5\n" + "\x0fImpactedCluster\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\x12\x0e\n" + - "\x02id\x18\x02 \x01(\tR\x02id\"\xf0\x02\n" + + "\x02id\x18\x02 \x01(\tR\x02id:\x02\x18\x01\"\xf0\x02\n" + "\aCRSMeta\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x129\n" + @@ -1003,22 +1009,22 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "expires_at\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\texpiresAt\x12+\n" + "\x11max_registrations\x18\a \x01(\x04R\x10maxRegistrations\x127\n" + "\x17registrations_initiated\x18\b \x03(\tR\x16registrationsInitiated\x127\n" + - "\x17registrations_completed\x18\t \x03(\tR\x16registrationsCompleted\"\x94\x01\n" + + "\x17registrations_completed\x18\t \x03(\tR\x16registrationsCompleted\"\x98\x01\n" + "\x15InitBundleGenResponse\x12&\n" + "\x04meta\x18\x01 \x01(\v2\x12.v1.InitBundleMetaR\x04meta\x12,\n" + "\x12helm_values_bundle\x18\x02 \x01(\fR\x10helmValuesBundle\x12%\n" + - "\x0ekubectl_bundle\x18\x03 \x01(\fR\rkubectlBundle\"C\n" + + "\x0ekubectl_bundle\x18\x03 \x01(\fR\rkubectlBundle:\x02\x18\x01\"C\n" + "\x0eCRSGenResponse\x12\x1f\n" + "\x04meta\x18\x01 \x01(\v2\v.v1.CRSMetaR\x04meta\x12\x10\n" + "\x03crs\x18\x02 \x01(\fR\x03crs\"C\n" + "\x13GetCAConfigResponse\x12,\n" + - "\x12helm_values_bundle\x18\x01 \x01(\fR\x10helmValuesBundle\"C\n" + + "\x12helm_values_bundle\x18\x01 \x01(\fR\x10helmValuesBundle\"G\n" + "\x17InitBundleMetasResponse\x12(\n" + - "\x05items\x18\x01 \x03(\v2\x12.v1.InitBundleMetaR\x05items\"5\n" + + "\x05items\x18\x01 \x03(\v2\x12.v1.InitBundleMetaR\x05items:\x02\x18\x01\"5\n" + "\x10CRSMetasResponse\x12!\n" + - "\x05items\x18\x01 \x03(\v2\v.v1.CRSMetaR\x05items\"*\n" + + "\x05items\x18\x01 \x03(\v2\v.v1.CRSMetaR\x05items\".\n" + "\x14InitBundleGenRequest\x12\x12\n" + - "\x04name\x18\x01 \x01(\tR\x04name\"#\n" + + "\x04name\x18\x01 \x01(\tR\x04name:\x02\x18\x01\"#\n" + "\rCRSGenRequest\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\"\xcd\x01\n" + "\x15CRSGenRequestExtended\x12\x12\n" + @@ -1026,33 +1032,33 @@ const file_api_v1_cluster_init_service_proto_rawDesc = "" + "\vvalid_until\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + "validUntil\x126\n" + "\tvalid_for\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\bvalidFor\x12+\n" + - "\x11max_registrations\x18\x05 \x01(\x04R\x10maxRegistrations\"n\n" + + "\x11max_registrations\x18\x05 \x01(\x04R\x10maxRegistrations\"r\n" + "\x17InitBundleRevokeRequest\x12\x10\n" + "\x03ids\x18\x01 \x03(\tR\x03ids\x12A\n" + - "\x1dconfirm_impacted_clusters_ids\x18\x02 \x03(\tR\x1aconfirmImpactedClustersIds\"$\n" + + "\x1dconfirm_impacted_clusters_ids\x18\x02 \x03(\tR\x1aconfirmImpactedClustersIds:\x02\x18\x01\"$\n" + "\x10CRSRevokeRequest\x12\x10\n" + - "\x03ids\x18\x01 \x03(\tR\x03ids\"\xe1\x02\n" + + "\x03ids\x18\x01 \x03(\tR\x03ids\"\xe5\x02\n" + "\x18InitBundleRevokeResponse\x12y\n" + "\x1dinit_bundle_revocation_errors\x18\x02 \x03(\v26.v1.InitBundleRevokeResponse.InitBundleRevocationErrorR\x1ainitBundleRevocationErrors\x125\n" + "\x17init_bundle_revoked_ids\x18\x03 \x03(\tR\x14initBundleRevokedIds\x1a\x92\x01\n" + "\x19InitBundleRevocationError\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + "\x05error\x18\x02 \x01(\tR\x05error\x12O\n" + - "\x11impacted_clusters\x18\x03 \x03(\v2\".v1.InitBundleMeta.ImpactedClusterR\x10impactedClusters\"\xce\x01\n" + + "\x11impacted_clusters\x18\x03 \x03(\v2\".v1.InitBundleMeta.ImpactedClusterR\x10impactedClusters:\x02\x18\x01\"\xce\x01\n" + "\x11CRSRevokeResponse\x12\\\n" + "\x15crs_revocation_errors\x18\x02 \x03(\v2(.v1.CRSRevokeResponse.CRSRevocationErrorR\x13crsRevocationErrors\x12\x1f\n" + "\vrevoked_ids\x18\x03 \x03(\tR\n" + "revokedIds\x1a:\n" + "\x12CRSRevocationError\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + - "\x05error\x18\x02 \x01(\tR\x05error2\xb4\x06\n" + - "\x12ClusterInitService\x12~\n" + - "\x10RevokeInitBundle\x12\x1b.v1.InitBundleRevokeRequest\x1a\x1c.v1.InitBundleRevokeResponse\"/\x82\xd3\xe4\x93\x02):\x01*2$/v1/cluster-init/init-bundles/revoke\x12`\n" + + "\x05error\x18\x02 \x01(\tR\x05error2\xbe\x06\n" + + "\x12ClusterInitService\x12\x81\x01\n" + + "\x10RevokeInitBundle\x12\x1b.v1.InitBundleRevokeRequest\x1a\x1c.v1.InitBundleRevokeResponse\"2\x82\xd3\xe4\x93\x02):\x01*2$/v1/cluster-init/init-bundles/revoke\x88\x02\x01\x12`\n" + "\tRevokeCRS\x12\x14.v1.CRSRevokeRequest\x1a\x15.v1.CRSRevokeResponse\"&\x82\xd3\xe4\x93\x02 :\x01*2\x1b/v1/cluster-init/crs/revoke\x12U\n" + - "\vGetCAConfig\x12\t.v1.Empty\x1a\x17.v1.GetCAConfigResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/cluster-init/ca-config\x12_\n" + - "\x0eGetInitBundles\x12\t.v1.Empty\x1a\x1b.v1.InitBundleMetasResponse\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/v1/cluster-init/init-bundles\x12H\n" + - "\aGetCRSs\x12\t.v1.Empty\x1a\x14.v1.CRSMetasResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/cluster-init/crs\x12s\n" + - "\x12GenerateInitBundle\x12\x18.v1.InitBundleGenRequest\x1a\x19.v1.InitBundleGenResponse\"(\x82\xd3\xe4\x93\x02\":\x01*\"\x1d/v1/cluster-init/init-bundles\x12U\n" + + "\vGetCAConfig\x12\t.v1.Empty\x1a\x17.v1.GetCAConfigResponse\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/v1/cluster-init/ca-config\x12b\n" + + "\x0eGetInitBundles\x12\t.v1.Empty\x1a\x1b.v1.InitBundleMetasResponse\"(\x82\xd3\xe4\x93\x02\x1f\x12\x1d/v1/cluster-init/init-bundles\x88\x02\x01\x12H\n" + + "\aGetCRSs\x12\t.v1.Empty\x1a\x14.v1.CRSMetasResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/v1/cluster-init/crs\x12v\n" + + "\x12GenerateInitBundle\x12\x18.v1.InitBundleGenRequest\x1a\x19.v1.InitBundleGenResponse\"+\x82\xd3\xe4\x93\x02\":\x01*\"\x1d/v1/cluster-init/init-bundles\x88\x02\x01\x12U\n" + "\vGenerateCRS\x12\x11.v1.CRSGenRequest\x1a\x12.v1.CRSGenResponse\"\x1f\x82\xd3\xe4\x93\x02\x19:\x01*\"\x14/v1/cluster-init/crs\x12n\n" + "\x13GenerateCRSExtended\x12\x19.v1.CRSGenRequestExtended\x1a\x12.v1.CRSGenResponse\"(\x82\xd3\xe4\x93\x02\":\x01*\"\x1d/v1/cluster-init/crs-extendedB'\n" + "\x18io.stackrox.proto.api.v1Z\v./api/v1;v1X\x01b\x06proto3" diff --git a/generated/api/v1/cluster_init_service.swagger.json b/generated/api/v1/cluster_init_service.swagger.json index f82d4953befee..6b2f55b85b710 100644 --- a/generated/api/v1/cluster_init_service.swagger.json +++ b/generated/api/v1/cluster_init_service.swagger.json @@ -157,6 +157,7 @@ }, "/v1/cluster-init/init-bundles": { "get": { + "summary": "Init bundles are deprecated in favor of Cluster Registration Tokens (CRS).", "operationId": "ClusterInitService_GetInitBundles", "responses": { "200": { @@ -177,6 +178,7 @@ ] }, "post": { + "summary": "Init bundles are deprecated in favor of Cluster Registration Tokens (CRS).", "operationId": "ClusterInitService_GenerateInitBundle", "responses": { "200": { @@ -209,7 +211,7 @@ }, "/v1/cluster-init/init-bundles/revoke": { "patch": { - "summary": "RevokeInitBundle deletes cluster init bundle. If this operation impacts any cluster\nthen its ID should be included in request.\nIf confirm_impacted_clusters_ids does not match with current impacted clusters\nthen request will fail with error that includes all impacted clusters.", + "summary": "RevokeInitBundle deletes cluster init bundle. If this operation impacts any cluster\nthen its ID should be included in request.\nIf confirm_impacted_clusters_ids does not match with current impacted clusters\nthen request will fail with error that includes all impacted clusters.\nInit bundles are deprecated in favor of Cluster Registration Tokens (CRS).", "operationId": "ClusterInitService_RevokeInitBundle", "responses": { "200": { diff --git a/generated/api/v1/cluster_init_service_grpc.pb.go b/generated/api/v1/cluster_init_service_grpc.pb.go index 6f621d2697bb9..1bb9d0def14ec 100644 --- a/generated/api/v1/cluster_init_service_grpc.pb.go +++ b/generated/api/v1/cluster_init_service_grpc.pb.go @@ -35,16 +35,22 @@ const ( // // ClusterInitService manages cluster init bundles and CRSs. type ClusterInitServiceClient interface { + // Deprecated: Do not use. // RevokeInitBundle deletes cluster init bundle. If this operation impacts any cluster // then its ID should be included in request. // If confirm_impacted_clusters_ids does not match with current impacted clusters // then request will fail with error that includes all impacted clusters. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). RevokeInitBundle(ctx context.Context, in *InitBundleRevokeRequest, opts ...grpc.CallOption) (*InitBundleRevokeResponse, error) // RevokeCRSBundle deletes cluster registration secrets. RevokeCRS(ctx context.Context, in *CRSRevokeRequest, opts ...grpc.CallOption) (*CRSRevokeResponse, error) GetCAConfig(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GetCAConfigResponse, error) + // Deprecated: Do not use. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). GetInitBundles(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*InitBundleMetasResponse, error) GetCRSs(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CRSMetasResponse, error) + // Deprecated: Do not use. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). GenerateInitBundle(ctx context.Context, in *InitBundleGenRequest, opts ...grpc.CallOption) (*InitBundleGenResponse, error) GenerateCRS(ctx context.Context, in *CRSGenRequest, opts ...grpc.CallOption) (*CRSGenResponse, error) GenerateCRSExtended(ctx context.Context, in *CRSGenRequestExtended, opts ...grpc.CallOption) (*CRSGenResponse, error) @@ -58,6 +64,7 @@ func NewClusterInitServiceClient(cc grpc.ClientConnInterface) ClusterInitService return &clusterInitServiceClient{cc} } +// Deprecated: Do not use. func (c *clusterInitServiceClient) RevokeInitBundle(ctx context.Context, in *InitBundleRevokeRequest, opts ...grpc.CallOption) (*InitBundleRevokeResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitBundleRevokeResponse) @@ -88,6 +95,7 @@ func (c *clusterInitServiceClient) GetCAConfig(ctx context.Context, in *Empty, o return out, nil } +// Deprecated: Do not use. func (c *clusterInitServiceClient) GetInitBundles(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*InitBundleMetasResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitBundleMetasResponse) @@ -108,6 +116,7 @@ func (c *clusterInitServiceClient) GetCRSs(ctx context.Context, in *Empty, opts return out, nil } +// Deprecated: Do not use. func (c *clusterInitServiceClient) GenerateInitBundle(ctx context.Context, in *InitBundleGenRequest, opts ...grpc.CallOption) (*InitBundleGenResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(InitBundleGenResponse) @@ -144,16 +153,22 @@ func (c *clusterInitServiceClient) GenerateCRSExtended(ctx context.Context, in * // // ClusterInitService manages cluster init bundles and CRSs. type ClusterInitServiceServer interface { + // Deprecated: Do not use. // RevokeInitBundle deletes cluster init bundle. If this operation impacts any cluster // then its ID should be included in request. // If confirm_impacted_clusters_ids does not match with current impacted clusters // then request will fail with error that includes all impacted clusters. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). RevokeInitBundle(context.Context, *InitBundleRevokeRequest) (*InitBundleRevokeResponse, error) // RevokeCRSBundle deletes cluster registration secrets. RevokeCRS(context.Context, *CRSRevokeRequest) (*CRSRevokeResponse, error) GetCAConfig(context.Context, *Empty) (*GetCAConfigResponse, error) + // Deprecated: Do not use. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). GetInitBundles(context.Context, *Empty) (*InitBundleMetasResponse, error) GetCRSs(context.Context, *Empty) (*CRSMetasResponse, error) + // Deprecated: Do not use. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). GenerateInitBundle(context.Context, *InitBundleGenRequest) (*InitBundleGenResponse, error) GenerateCRS(context.Context, *CRSGenRequest) (*CRSGenResponse, error) GenerateCRSExtended(context.Context, *CRSGenRequestExtended) (*CRSGenResponse, error) diff --git a/proto/api/v1/cluster_init_service.proto b/proto/api/v1/cluster_init_service.proto index c90dacf510f59..8717bc6796d74 100644 --- a/proto/api/v1/cluster_init_service.proto +++ b/proto/api/v1/cluster_init_service.proto @@ -12,6 +12,7 @@ option go_package = "./api/v1;v1"; option java_package = "io.stackrox.proto.api.v1"; message InitBundleMeta { + option deprecated = true; message ImpactedCluster { string name = 1; string id = 2; @@ -37,6 +38,7 @@ message CRSMeta { } message InitBundleGenResponse { + option deprecated = true; InitBundleMeta meta = 1; bytes helm_values_bundle = 2; bytes kubectl_bundle = 3; @@ -52,6 +54,7 @@ message GetCAConfigResponse { } message InitBundleMetasResponse { + option deprecated = true; repeated InitBundleMeta items = 1; } @@ -60,6 +63,7 @@ message CRSMetasResponse { } message InitBundleGenRequest { + option deprecated = true; string name = 1; } @@ -76,6 +80,7 @@ message CRSGenRequestExtended { } message InitBundleRevokeRequest { + option deprecated = true; repeated string ids = 1; repeated string confirm_impacted_clusters_ids = 2; } @@ -85,6 +90,7 @@ message CRSRevokeRequest { } message InitBundleRevokeResponse { + option deprecated = true; message InitBundleRevocationError { string id = 1; string error = 2; @@ -111,7 +117,9 @@ service ClusterInitService { // then its ID should be included in request. // If confirm_impacted_clusters_ids does not match with current impacted clusters // then request will fail with error that includes all impacted clusters. + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). rpc RevokeInitBundle(InitBundleRevokeRequest) returns (InitBundleRevokeResponse) { + option deprecated = true; option (google.api.http) = { patch: "/v1/cluster-init/init-bundles/revoke" body: "*" @@ -130,7 +138,9 @@ service ClusterInitService { option (google.api.http) = {get: "/v1/cluster-init/ca-config"}; } + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). rpc GetInitBundles(Empty) returns (InitBundleMetasResponse) { + option deprecated = true; option (google.api.http) = {get: "/v1/cluster-init/init-bundles"}; } @@ -138,7 +148,9 @@ service ClusterInitService { option (google.api.http) = {get: "/v1/cluster-init/crs"}; } + // Init bundles are deprecated in favor of Cluster Registration Tokens (CRS). rpc GenerateInitBundle(InitBundleGenRequest) returns (InitBundleGenResponse) { + option deprecated = true; option (google.api.http) = { post: "/v1/cluster-init/init-bundles" body: "*" From 9f1184eaa4840bda173e14d26a20e9bc90c3c66a Mon Sep 17 00:00:00 2001 From: Tomasz Janiszewski Date: Thu, 19 Feb 2026 16:14:37 +0000 Subject: [PATCH 230/232] fix(CI): capture GHA job failures in JUnit format (#19049) Co-authored-by: Claude Sonnet 4.5 Co-authored-by: sourcery-ai[bot] <58596630+sourcery-ai[bot]@users.noreply.github.com> --- .github/actions/junit2jira/action.yaml | 16 +++ scripts/ci/bats/lib_junit.bats | 191 +++++++++++++++++++++++++ scripts/ci/lib.sh | 82 ++++++++++- 3 files changed, 288 insertions(+), 1 deletion(-) diff --git a/.github/actions/junit2jira/action.yaml b/.github/actions/junit2jira/action.yaml index 96b39cbcd6dcb..82e6eebe9f9f8 100644 --- a/.github/actions/junit2jira/action.yaml +++ b/.github/actions/junit2jira/action.yaml @@ -40,6 +40,22 @@ runs: curl --retry 5 --retry-connrefused --silent --show-error --fail --location --output junit2jira "$LOCATION" chmod +x junit2jira fi + - name: Capture job failure as JUnit if no test failures exist + shell: bash + if: always() + env: + STEPS_JSON: ${{ toJSON(steps) }} + run: | + set -u + source scripts/ci/lib.sh + export ARTIFACT_DIR="${{ inputs.directory }}" + + capture_job_failure_as_junit \ + "${{ inputs.directory }}" \ + "${{ github.job }}" \ + "${{ job.status }}" \ + "$STEPS_JSON" \ + "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - name: Check files id: run shell: bash diff --git a/scripts/ci/bats/lib_junit.bats b/scripts/ci/bats/lib_junit.bats index 8dfc83cfdb70a..88b68842c9ad2 100644 --- a/scripts/ci/bats/lib_junit.bats +++ b/scripts/ci/bats/lib_junit.bats @@ -145,3 +145,194 @@ _EO_DETAILS_ run cat "${junit_dir}/junit-UNITTest.xml" assert_output --partial 'name="A <unit> &test"' } + +@test "junit_contains_failure detects failures from save_junit_failure" { + run save_junit_failure "UNITTest" "A unit test" "failure details" + assert_success + run junit_contains_failure "${ARTIFACT_DIR}" + assert_success +} + +@test "junit_contains_failure returns false for empty directory" { + run junit_contains_failure "${ARTIFACT_DIR}" + assert_failure +} + +@test "junit_contains_failure returns false for non-existent directory" { + run junit_contains_failure "${ARTIFACT_DIR}/does-not-exist" + assert_failure +} + +@test "junit_contains_failure returns false for success-only junit" { + run save_junit_success "UNITTest" "A unit test" + assert_success + run junit_contains_failure "${ARTIFACT_DIR}" + assert_failure +} + +@test "junit_contains_failure detects failures with attributes" { + mkdir -p "${junit_dir}" + echo 'test' > "${junit_dir}/test.xml" + run junit_contains_failure "${ARTIFACT_DIR}" + assert_success +} + +@test "junit_contains_failure detects failures without attributes" { + mkdir -p "${junit_dir}" + echo 'test' > "${junit_dir}/test.xml" + run junit_contains_failure "${ARTIFACT_DIR}" + assert_success +} + +@test "capture_job_failure_as_junit: job succeeds - no failure record" { + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "success" \ + '{}' \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "no failure record needed" + + # Verify no JUnit files were created (directory may not exist if no files created) + if [[ -d "${junit_dir}" ]]; then + run find "${junit_dir}" -name "*.xml" + assert_output "" + fi +} + +@test "capture_job_failure_as_junit: job fails with existing JUnit failures - skips" { + # Create existing failure + save_junit_failure "ExistingTest" "test" "existing failure" + + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "failure" \ + '{}' \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "JUnit test failures already exist" + + # Verify only the original failure exists, no test-job file created + assert [ -f "${junit_dir}/junit-ExistingTest.xml" ] + assert [ ! -f "${junit_dir}/junit-test-job.xml" ] + run grep -c 'classname="ExistingTest"' "${junit_dir}/junit-ExistingTest.xml" + assert_output 1 +} + +@test "capture_job_failure_as_junit: job fails with failed step - creates specific failure" { + steps_json='{"my-step":{"outcome":"failure","conclusion":"failure"}}' + + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "failure" \ + "$steps_json" \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "Created JUnit failure record for step: my-step" + + # Verify JUnit file exists + assert [ -f "${junit_dir}/junit-test-job.xml" ] + + # Create expected XML for comparison + expected="${BATS_TEST_TMPDIR}/expected.xml" + cat > "$expected" <<'EOF' + + + + + +EOF + + # Compare actual with expected + run diff "${junit_dir}/junit-test-job.xml" "$expected" + assert_success + + # Verify junit_contains_failure detects it + run junit_contains_failure "${ARTIFACT_DIR}" + assert_success +} + +@test "capture_job_failure_as_junit: job fails without failed step - creates generic failure" { + steps_json='{"my-step":{"outcome":"success","conclusion":"success"}}' + + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "failure" \ + "$steps_json" \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "Created generic JUnit failure record for job: test-job" + + # Verify JUnit file exists + assert [ -f "${junit_dir}/junit-test-job.xml" ] + + # Create expected XML for comparison + expected="${BATS_TEST_TMPDIR}/expected-generic.xml" + cat > "$expected" <<'EOF' + + + + + +EOF + + # Compare actual with expected + run diff "${junit_dir}/junit-test-job.xml" "$expected" + assert_success + + # Verify junit_contains_failure detects it + run junit_contains_failure "${ARTIFACT_DIR}" + assert_success +} + +@test "capture_job_failure_as_junit: empty steps JSON - creates generic failure" { + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "failure" \ + '{}' \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "Created generic JUnit failure record for job: test-job" + + # Verify the file was created and has correct structure + assert [ -f "${junit_dir}/junit-test-job.xml" ] + run cat "${junit_dir}/junit-test-job.xml" + assert_output --partial 'name="error"' + assert_output --partial 'failures="1"' +} + +@test "capture_job_failure_as_junit: multiple failed steps - uses first one" { + steps_json='{"step-one":{"outcome":"failure","conclusion":"failure"},"step-two":{"outcome":"failure","conclusion":"failure"}}' + + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" \ + "failure" \ + "$steps_json" \ + "https://github.com/test/repo/actions/runs/123" + assert_success + assert_output --partial "Created JUnit failure record for step: step-one" + + # Verify only step-one is recorded (not step-two) + run cat "${junit_dir}/junit-test-job.xml" + assert_output --partial 'name="step-one"' + refute_output --partial 'name="step-two"' +} + +@test "capture_job_failure_as_junit: missing arguments - dies" { + run capture_job_failure_as_junit \ + "${ARTIFACT_DIR}" \ + "test-job" + assert_failure + assert_output --partial "missing args" +} diff --git a/scripts/ci/lib.sh b/scripts/ci/lib.sh index 8bdbf9f376d60..3708bb4b9352c 100755 --- a/scripts/ci/lib.sh +++ b/scripts/ci/lib.sh @@ -2196,7 +2196,8 @@ junit_contains_failure() { # and "return" does not mix with piping to "while read", so we use a "for" over find. # shellcheck disable=SC2044 for f in $(find "$dir" -type f -iname '*.xml'); do - if grep -q ' and formats + if grep -q ']' "$f"; then return 0 fi done @@ -2335,6 +2336,85 @@ _EO_SKIPPED_ echo "" >> "${junit_file}" } +# capture_job_failure_as_junit() - generates a JUnit failure record when a job +# fails without producing JUnit test failures. This captures infrastructure +# failures (e.g., docker login, setup steps) that occur before tests run. +# +# Usage: capture_job_failure_as_junit +# +# Arguments: +# directory - Directory where JUnit XML files are stored +# job_name - Name of the current GitHub Actions job +# job_status - Status of the job (success, failure, cancelled) +# steps_json - JSON output from toJSON(steps) context +# workflow_run_url - URL to the workflow run for debugging +# +# Returns: +# 0 if no action was needed or if failure record was created successfully +# 1 if there was an error +capture_job_failure_as_junit() { + if [[ "$#" -ne 5 ]]; then + die "missing args. usage: capture_job_failure_as_junit " + fi + + local directory="$1" + local job_name="$2" + local job_status="$3" + local steps_json="$4" + local workflow_run_url="$5" + + # Only process failures + if [[ "$job_status" != "failure" ]]; then + info "Job status: ${job_status} - no failure record needed" + return 0 + fi + + # Check if JUnit test failures already exist + if junit_contains_failure "$directory"; then + info "JUnit test failures already exist - skipping failure record" + return 0 + fi + + info "Job failed but no JUnit test failures found - looking for failed step" + + # Try to find a specific failed step from steps context (only includes steps with id) + local failed_step + failed_step=$(echo "$steps_json" | jq -r 'to_entries[] | select(.value.outcome == "failure") | .key' | head -1) + + if [[ -n "$failed_step" ]]; then + # Found a specific failed step - use its details + local step_outcome step_conclusion + step_outcome=$(echo "$steps_json" | jq -r ".[\"$failed_step\"].outcome") + step_conclusion=$(echo "$steps_json" | jq -r ".[\"$failed_step\"].conclusion") + + local failure_details + failure_details=$(cat < Date: Thu, 19 Feb 2026 13:20:53 -0500 Subject: [PATCH 231/232] ROX-33207: Fix ImageV2 SearchListImages query (#19084) --- central/imagev2/datastore/store/postgres/store.go | 2 +- central/imagev2/views/list_image_view.go | 2 +- generated/storage/image_v2.pb.go | 2 +- pkg/search/options.go | 1 + proto/storage/image_v2.proto | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/central/imagev2/datastore/store/postgres/store.go b/central/imagev2/datastore/store/postgres/store.go index e5d7b044a9b8f..f40c4155f66de 100644 --- a/central/imagev2/datastore/store/postgres/store.go +++ b/central/imagev2/datastore/store/postgres/store.go @@ -985,7 +985,7 @@ func (s *storeImpl) GetListImagesView(ctx context.Context, q *v1.Query) ([]*view selects := []*v1.QuerySelect{ search.NewQuerySelect(search.ImageSHA).Proto(), search.NewQuerySelect(search.ImageName).Proto(), - search.NewQuerySelect(search.ComponentCount).Proto(), + search.NewQuerySelect(search.ImageComponentCount).Proto(), search.NewQuerySelect(search.ImageCVECount).Proto(), search.NewQuerySelect(search.FixableCVECount).Proto(), search.NewQuerySelect(search.ImageCreatedTime).Proto(), diff --git a/central/imagev2/views/list_image_view.go b/central/imagev2/views/list_image_view.go index 82c328fa8bf91..2be005fd8e474 100644 --- a/central/imagev2/views/list_image_view.go +++ b/central/imagev2/views/list_image_view.go @@ -12,7 +12,7 @@ import ( type ListImageV2View struct { Digest string `db:"image_sha"` Name string `db:"image"` - ComponentCount int32 `db:"component_count"` + ComponentCount int32 `db:"image_component_count"` CVECount int32 `db:"image_cve_count"` FixableCVECount int32 `db:"fixable_cve_count"` Created *time.Time `db:"image_created_time"` diff --git a/generated/storage/image_v2.pb.go b/generated/storage/image_v2.pb.go index 6c304fe896c3f..91c8fec274ec4 100644 --- a/generated/storage/image_v2.pb.go +++ b/generated/storage/image_v2.pb.go @@ -343,7 +343,7 @@ func (x *ListImageV2) GetPriority() int64 { type ImageV2_ScanStats struct { state protoimpl.MessageState `protogen:"open.v1"` // Caching component count to avoid re-calculating it by joining on the component table. - ComponentCount int32 `protobuf:"varint,1,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty" search:"Component Count,hidden"` // @gotags: search:"Component Count,hidden" + ComponentCount int32 `protobuf:"varint,1,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty" search:"Image Component Count,hidden"` // @gotags: search:"Image Component Count,hidden" // Caching cve count to avoid re-calculating it by joining on the cve table. CveCount int32 `protobuf:"varint,2,opt,name=cve_count,json=cveCount,proto3" json:"cve_count,omitempty" search:"Image CVE Count,hidden"` // @gotags: search:"Image CVE Count,hidden" // Caching fixable cve count to avoid re-calculating it by joining on the cve table. diff --git a/pkg/search/options.go b/pkg/search/options.go index fb4d931804b2c..1c2111f837397 100644 --- a/pkg/search/options.go +++ b/pkg/search/options.go @@ -97,6 +97,7 @@ var ( ImageUser = newFieldLabel("Image User") ImageCommand = newFieldLabel("Image Command") ImageCVECount = newFieldLabel("Image CVE Count") + ImageComponentCount = newFieldLabel("Image Component Count") ImageEntrypoint = newFieldLabel("Image Entrypoint") ImageLabel = newFieldLabel("Image Label") ImageVolumes = newFieldLabel("Image Volumes") diff --git a/proto/storage/image_v2.proto b/proto/storage/image_v2.proto index c3120d96e5185..65f4beca37dfd 100644 --- a/proto/storage/image_v2.proto +++ b/proto/storage/image_v2.proto @@ -22,7 +22,7 @@ message ImageV2 { message ScanStats { // Caching component count to avoid re-calculating it by joining on the component table. - int32 component_count = 1; // @gotags: search:"Component Count,hidden" + int32 component_count = 1; // @gotags: search:"Image Component Count,hidden" // Caching cve count to avoid re-calculating it by joining on the cve table. int32 cve_count = 2; // @gotags: search:"Image CVE Count,hidden" From d3feadfecfc08b6f76ee8ca3616f4abbc932b0df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 12:06:56 -0700 Subject: [PATCH 232/232] chore(deps): bump org.spockframework:spock-bom from 2.3-groovy-4.0 to 2.4-groovy-5.0 in /qa-tests-backend (#18199) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- qa-tests-backend/gradle/libs.versions.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa-tests-backend/gradle/libs.versions.toml b/qa-tests-backend/gradle/libs.versions.toml index f14494dd3e5a8..541dbb1f82864 100644 --- a/qa-tests-backend/gradle/libs.versions.toml +++ b/qa-tests-backend/gradle/libs.versions.toml @@ -17,7 +17,7 @@ grpc-auth = { module = "io.grpc:grpc-auth", version.ref = "grpc" } netty-tcnative-boringssl-static = { module = "io.netty:netty-tcnative-boringssl-static", version.ref = "netty-tcnative" } groovy-bom = { module = "org.apache.groovy:groovy-bom", version = "groovy" } groovy = { module = "org.apache.groovy:groovy", version.ref = "groovy" } -spock-bom = { module = "org.spockframework:spock-bom", version = "2.4-M7-groovy-5.0" } +spock-bom = { module = "org.spockframework:spock-bom", version = "2.4-groovy-5.0" } spock-core = { module = "org.spockframework:spock-core" } spock-junit4 = { module = "org.spockframework:spock-junit4" } rest-assured = { module = "io.rest-assured:rest-assured", version = "6.0.0" }