Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-01-09 18:07:32 +00:00
parent 1f753bca26
commit cd22685717
92 changed files with 1479 additions and 276 deletions

View File

@ -40,11 +40,12 @@ default:
OMNIBUS_GITLAB_CACHE_EDITION: "GITLAB_RUBY3_2"
.default-branch-pipeline-failure-variables: &default-branch-pipeline-failure-variables
CREATE_RAILS_TEST_FAILURE_ISSUES: "true"
CREATE_RAILS_SLOW_TEST_ISSUES: "true"
CREATE_RAILS_TEST_FAILURE_ISSUES: "true"
.default-merge-request-slow-tests-variables: &default-merge-request-slow-tests-variables
.default-merge-request-variables: &default-merge-request-variables
ADD_SLOW_TEST_NOTE_TO_MERGE_REQUEST: "true"
CREATE_RAILS_FLAKY_TEST_ISSUES: "true"
.if-merge-request-security-canonical-sync: &if-merge-request-security-canonical-sync
if: '$CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == "gitlab-org/security/gitlab" && $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_DEFAULT_BRANCH && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH'
@ -104,8 +105,7 @@ workflow:
# For (detached) merge request pipelines.
- if: '$CI_MERGE_REQUEST_IID'
variables:
<<: *default-ruby-variables
<<: *default-merge-request-slow-tests-variables
<<: [*default-ruby-variables, *default-merge-request-variables]
PIPELINE_NAME: 'Ruby $RUBY_VERSION $CI_MERGE_REQUEST_EVENT_TYPE MR pipeline'
NO_SOURCEMAPS: 'true'
# For the scheduled pipelines, we set specific variables.
@ -122,7 +122,7 @@ workflow:
variables:
<<: *next-ruby-variables
PIPELINE_NAME: 'Scheduled Ruby $RUBY_VERSION $CI_COMMIT_BRANCH branch pipeline'
# This work around https://gitlab.com/gitlab-org/gitlab/-/issues/332411 whichs prevents usage of dependency proxy
# This work around https://gitlab.com/gitlab-org/gitlab/-/issues/332411 which prevents usage of dependency proxy
# when pipeline is triggered by a project access token.
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $GITLAB_USER_LOGIN =~ /project_\d+_bot\d*/'
variables:
@ -183,7 +183,7 @@ variables:
CI_FETCH_REPO_GIT_STRATEGY: "none"
DEBIAN_VERSION: "bullseye"
UBI_VERSION: "8.6"
CHROME_VERSION: "113"
CHROME_VERSION: "119"
DOCKER_VERSION: "24.0.5"
RUBYGEMS_VERSION: "3.4"
GO_VERSION: "1.20"

View File

@ -80,7 +80,7 @@ gemnasium-python-dependency_scanning:
extends: .default-retry
stage: test
image:
name: ${REGISTRY_HOST}/${REGISTRY_GROUP}/security-products/package-hunter-cli:v3.0.0@sha256:e281525b3be870d6618b6bad2685733dcb9908e4eb21f0e5b4fe4bb6f6083f91
name: ${REGISTRY_HOST}/${REGISTRY_GROUP}/security-products/package-hunter-cli:v3.0.1@sha256:ffa4af2810fed6922ba9d19badc4636043f54f70db19aebb8253e83142e5da16
entrypoint: [""]
variables:
HTR_user: '$PACKAGE_HUNTER_USER'

View File

@ -87,7 +87,6 @@ start-review-app-pipeline:
# https://gitlab.com/gitlab-org/gitlab/-/issues/387183
inherit:
variables:
- CHROME_VERSION
- REGISTRY_GROUP
- REGISTRY_HOST
- REVIEW_APPS_DOMAIN
@ -96,6 +95,8 @@ start-review-app-pipeline:
- REVIEW_APPS_IMAGE
- RUBY_VERSION
- DEBIAN_VERSION
- DOCKER_VERSION
- CHROME_VERSION
# These variables are set in the pipeline schedules.
# They need to be explicitly passed on to the child pipeline.

View File

@ -68,7 +68,6 @@ RSpec/BeforeAllRoleAssignment:
- 'ee/spec/features/boards/sidebar_spec.rb'
- 'ee/spec/features/boards/swimlanes/epics_swimlanes_sidebar_spec.rb'
- 'ee/spec/features/burnup_charts_spec.rb'
- 'ee/spec/features/ci/ci_catalog_spec.rb'
- 'ee/spec/features/dashboards/todos_spec.rb'
- 'ee/spec/features/epic_boards/epic_boards_sidebar_spec.rb'
- 'ee/spec/features/epic_boards/epic_boards_spec.rb'
@ -265,7 +264,6 @@ RSpec/BeforeAllRoleAssignment:
- 'ee/spec/graphql/types/vulnerability_response_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability_scanner_type_spec.rb'
- 'ee/spec/graphql/types/vulnerability_type_spec.rb'
- 'ee/spec/helpers/ee/ci/catalog/resources_helper_spec.rb'
- 'ee/spec/helpers/ee/ci/pipeline_editor_helper_spec.rb'
- 'ee/spec/helpers/ee/environments_helper_spec.rb'
- 'ee/spec/helpers/ee/groups_helper_spec.rb'
@ -552,7 +550,6 @@ RSpec/BeforeAllRoleAssignment:
- 'ee/spec/requests/jwt_controller_spec.rb'
- 'ee/spec/requests/lfs_locks_api_spec.rb'
- 'ee/spec/requests/projects/analytics/cycle_analytics/stages_controller_spec.rb'
- 'ee/spec/requests/projects/ci/catalog/resources_controller_spec.rb'
- 'ee/spec/requests/projects/dependencies_controller_spec.rb'
- 'ee/spec/requests/projects/issues_controller_spec.rb'
- 'ee/spec/requests/projects/on_demand_scans_controller_spec.rb'

View File

@ -193,7 +193,6 @@ RSpec/NamedSubject:
- 'ee/spec/helpers/compliance_management/compliance_framework/group_settings_helper_spec.rb'
- 'ee/spec/helpers/ee/auth_helper_spec.rb'
- 'ee/spec/helpers/ee/branches_helper_spec.rb'
- 'ee/spec/helpers/ee/ci/catalog/resources_helper_spec.rb'
- 'ee/spec/helpers/ee/ci/runners_helper_spec.rb'
- 'ee/spec/helpers/ee/emails_helper_spec.rb'
- 'ee/spec/helpers/ee/environments_helper_spec.rb'

View File

@ -1986,7 +1986,6 @@ Style/InlineDisableAnnotation:
- 'ee/spec/features/trials/saas/creation_with_one_existing_namespace_flow_spec.rb'
- 'ee/spec/finders/audit_event_finder_spec.rb'
- 'ee/spec/finders/ee/group_members_finder_spec.rb'
- 'ee/spec/frontend/fixtures/ci_catalog_resources.rb'
- 'ee/spec/helpers/analytics/analytics_dashboards_helper_spec.rb'
- 'ee/spec/helpers/ee/dashboard_helper_spec.rb'
- 'ee/spec/helpers/ee/releases_helper_spec.rb'

View File

@ -1,9 +1,12 @@
<script>
import { GlTabs, GlTab, GlLoadingIcon, GlBadge, GlTable, GlPagination } from '@gitlab/ui';
import { __, s__ } from '~/locale';
import { getAge } from '~/kubernetes_dashboard/helpers/k8s_integration_helper';
import {
getAge,
generateServicePortsString,
} from '~/kubernetes_dashboard/helpers/k8s_integration_helper';
import { SERVICES_TABLE_FIELDS } from '~/kubernetes_dashboard/constants';
import k8sServicesQuery from '../graphql/queries/k8s_services.query.graphql';
import { generateServicePortsString } from '../helpers/k8s_integration_helper';
import { SERVICES_LIMIT_PER_PAGE } from '../constants';
import KubernetesSummary from './kubernetes_summary.vue';
@ -82,6 +85,14 @@ export default {
? null
: nextPage;
},
servicesFields() {
return SERVICES_TABLE_FIELDS.map((field) => {
return {
...field,
thClass: tableHeadingClasses,
};
});
},
},
i18n: {
servicesTitle: s__('Environment|Services'),
@ -94,43 +105,6 @@ export default {
ports: s__('Environment|Ports'),
age: s__('Environment|Age'),
},
servicesFields: [
{
key: 'name',
label: __('Name'),
thClass: tableHeadingClasses,
},
{
key: 'namespace',
label: __('Namespace'),
thClass: tableHeadingClasses,
},
{
key: 'type',
label: __('Type'),
thClass: tableHeadingClasses,
},
{
key: 'clusterIP',
label: s__('Environment|Cluster IP'),
thClass: tableHeadingClasses,
},
{
key: 'externalIP',
label: s__('Environment|External IP'),
thClass: tableHeadingClasses,
},
{
key: 'ports',
label: s__('Environment|Ports'),
thClass: tableHeadingClasses,
},
{
key: 'age',
label: s__('Environment|Age'),
thClass: tableHeadingClasses,
},
],
SERVICES_LIMIT_PER_PAGE,
};
</script>
@ -154,7 +128,7 @@ export default {
<gl-table
v-else
:fields="$options.servicesFields"
:fields="servicesFields"
:items="servicesItems"
:per-page="$options.SERVICES_LIMIT_PER_PAGE"
:current-page="currentPage"

View File

@ -13,17 +13,6 @@ import {
} from '~/kubernetes_dashboard/constants';
import { CLUSTER_AGENT_ERROR_MESSAGES } from '../constants';
export function generateServicePortsString(ports) {
if (!ports?.length) return '';
return ports
.map((port) => {
const nodePort = port.nodePort ? `:${port.nodePort}` : '';
return `${port.port}${nodePort}/${port.protocol}`;
})
.join(', ');
}
export function getDeploymentsStatuses(items) {
const failed = [];
const ready = [];

View File

@ -14,8 +14,7 @@ export default {
item: {
type: Object,
required: true,
validator: (item) =>
['name', 'kind', 'labels', 'annotations', 'status'].every((key) => item[key]),
validator: (item) => ['name', 'kind', 'labels', 'annotations'].every((key) => item[key]),
},
},
computed: {
@ -63,7 +62,7 @@ export default {
</gl-badge>
</div>
</workload-details-item>
<workload-details-item :label="$options.i18n.status">
<workload-details-item v-if="item.status" :label="$options.i18n.status">
<gl-badge :variant="$options.WORKLOAD_STATUS_BADGE_VARIANTS[item.status]">{{
item.status
}}</gl-badge></workload-details-item

View File

@ -33,6 +33,11 @@ export default {
type: Array,
required: true,
},
fields: {
type: Array,
required: false,
default: null,
},
},
data() {
return {
@ -59,7 +64,7 @@ export default {
</gl-alert>
<div v-else>
<workload-stats :stats="stats" />
<workload-table :items="items" @select-item="onItemSelect" />
<workload-table :items="items" :fields="fields" @select-item="onItemSelect" />
<gl-drawer
:open="showDetailsDrawer"

View File

@ -1,4 +1,4 @@
import { s__ } from '~/locale';
import { __, s__ } from '~/locale';
export const STATUS_RUNNING = 'Running';
export const STATUS_PENDING = 'Pending';
@ -53,3 +53,34 @@ export const DEFAULT_WORKLOAD_TABLE_FIELDS = [
export const STATUS_TRUE = 'True';
export const STATUS_FALSE = 'False';
export const SERVICES_TABLE_FIELDS = [
{
key: 'name',
label: __('Name'),
},
{
key: 'namespace',
label: __('Namespace'),
},
{
key: 'type',
label: __('Type'),
},
{
key: 'clusterIP',
label: s__('Environment|Cluster IP'),
},
{
key: 'externalIP',
label: s__('Environment|External IP'),
},
{
key: 'ports',
label: s__('Environment|Ports'),
},
{
key: 'age',
label: s__('Environment|Age'),
},
];

View File

@ -8,6 +8,7 @@ import k8sReplicaSetsQuery from './queries/k8s_dashboard_replica_sets.query.grap
import k8sDaemonSetsQuery from './queries/k8s_dashboard_daemon_sets.query.graphql';
import k8sJobsQuery from './queries/k8s_dashboard_jobs.query.graphql';
import k8sCronJobsQuery from './queries/k8s_dashboard_cron_jobs.query.graphql';
import k8sServicesQuery from './queries/k8s_dashboard_services.query.graphql';
import { resolvers } from './resolvers';
export const apolloProvider = () => {
@ -110,6 +111,19 @@ export const apolloProvider = () => {
},
});
cache.writeQuery({
query: k8sServicesQuery,
data: {
metadata,
spec: {
type: null,
clusterIP: null,
externalIP: null,
ports: null,
},
},
});
return new VueApollo({
defaultClient,
});

View File

@ -62,6 +62,24 @@ export const mapJobItem = (item) => {
};
};
export const mapServicesItems = (item) => {
const { type, clusterIP, externalIP, ports } = item.spec;
return {
metadata: {
...item.metadata,
annotations: item.metadata?.annotations || {},
labels: item.metadata?.labels || {},
},
spec: {
type,
clusterIP: clusterIP || '-',
externalIP: externalIP || '-',
ports,
},
};
};
export const mapCronJobItem = (item) => {
const metadata = {
...item.metadata,

View File

@ -0,0 +1,17 @@
query getK8sDashboardServices($configuration: LocalConfiguration) {
k8sServices(configuration: $configuration) @client {
metadata {
name
namespace
creationTimestamp
labels
annotations
}
spec {
type
clusterIP
externalIP
ports
}
}
}

View File

@ -1,4 +1,4 @@
import { Configuration, AppsV1Api, BatchV1Api } from '@gitlab/cluster-client';
import { Configuration, CoreV1Api, AppsV1Api, BatchV1Api } from '@gitlab/cluster-client';
import {
getK8sPods,
@ -9,6 +9,7 @@ import {
watchWorkloadItems,
mapJobItem,
mapCronJobItem,
mapServicesItems,
} from '../helpers/resolver_helpers';
import k8sDashboardPodsQuery from '../queries/k8s_dashboard_pods.query.graphql';
import k8sDashboardDeploymentsQuery from '../queries/k8s_dashboard_deployments.query.graphql';
@ -17,6 +18,7 @@ import k8sDashboardReplicaSetsQuery from '../queries/k8s_dashboard_replica_sets.
import k8sDaemonSetsQuery from '../queries/k8s_dashboard_daemon_sets.query.graphql';
import k8sJobsQuery from '../queries/k8s_dashboard_jobs.query.graphql';
import k8sCronJobsQuery from '../queries/k8s_dashboard_cron_jobs.query.graphql';
import k8sServicesQuery from '../queries/k8s_dashboard_services.query.graphql';
export default {
k8sPods(_, { configuration }, { client }) {
@ -244,4 +246,40 @@ export default {
}
});
},
k8sServices(_, { configuration, namespace = '' }, { client }) {
const config = new Configuration(configuration);
const coreV1Api = new CoreV1Api(config);
const servicesApi = namespace
? coreV1Api.listCoreV1NamespacedService({ namespace })
: coreV1Api.listCoreV1ServiceForAllNamespaces();
return servicesApi
.then((res) => {
const watchPath = buildWatchPath({
resource: 'services',
namespace,
});
watchWorkloadItems({
client,
query: k8sServicesQuery,
configuration,
namespace,
watchPath,
queryField: 'k8sServices',
mapFn: mapServicesItems,
});
const data = res?.items || [];
return data.map(mapServicesItems);
})
.catch(async (err) => {
try {
await handleClusterError(err);
} catch (error) {
throw new Error(error.message);
}
});
},
};

View File

@ -77,3 +77,14 @@ export function calculateCronJobStatus(item) {
}
return STATUS_READY;
}
export function generateServicePortsString(ports) {
if (!ports?.length) return '';
return ports
.map((port) => {
const nodePort = port.nodePort ? `:${port.nodePort}` : '';
return `${port.port}${nodePort}/${port.protocol}`;
})
.join(', ');
}

View File

@ -0,0 +1,69 @@
<script>
import { s__ } from '~/locale';
import { getAge, generateServicePortsString } from '../helpers/k8s_integration_helper';
import { SERVICES_TABLE_FIELDS } from '../constants';
import WorkloadLayout from '../components/workload_layout.vue';
import k8sServicesQuery from '../graphql/queries/k8s_dashboard_services.query.graphql';
export default {
components: {
WorkloadLayout,
},
inject: ['configuration'],
apollo: {
k8sServices: {
query: k8sServicesQuery,
variables() {
return {
configuration: this.configuration,
};
},
update(data) {
return (
data?.k8sServices?.map((service) => {
return {
name: service.metadata?.name,
namespace: service.metadata?.namespace,
type: service.spec?.type,
clusterIP: service.spec?.clusterIP,
externalIP: service.spec?.externalIP,
ports: generateServicePortsString(service?.spec?.ports),
age: getAge(service.metadata?.creationTimestamp),
labels: service.metadata?.labels,
annotations: service.metadata?.annotations,
kind: s__('KubernetesDashboard|Service'),
};
}) || []
);
},
error(err) {
this.errorMessage = err?.message;
},
},
},
data() {
return {
k8sServices: [],
errorMessage: '',
};
},
computed: {
loading() {
return this.$apollo.queries.k8sServices.loading;
},
servicesStats() {
return [];
},
},
SERVICES_TABLE_FIELDS,
};
</script>
<template>
<workload-layout
:loading="loading"
:error-message="errorMessage"
:stats="servicesStats"
:items="k8sServices"
:fields="$options.SERVICES_TABLE_FIELDS"
/>
</template>

View File

@ -5,6 +5,7 @@ export const REPLICA_SETS_ROUTE_NAME = 'replicaSets';
export const DAEMON_SETS_ROUTE_NAME = 'daemonSets';
export const JOBS_ROUTE_NAME = 'jobs';
export const CRON_JOBS_ROUTE_NAME = 'cronJobs';
export const SERVICES_ROUTE_NAME = 'services';
export const PODS_ROUTE_PATH = '/pods';
export const DEPLOYMENTS_ROUTE_PATH = '/deployments';
@ -13,3 +14,4 @@ export const REPLICA_SETS_ROUTE_PATH = '/replicasets';
export const DAEMON_SETS_ROUTE_PATH = '/daemonsets';
export const JOBS_ROUTE_PATH = '/jobs';
export const CRON_JOBS_ROUTE_PATH = '/cronjobs';
export const SERVICES_ROUTE_PATH = '/services';

View File

@ -6,6 +6,7 @@ import ReplicaSetsPage from '../pages/replica_sets_page.vue';
import DaemonSetsPage from '../pages/daemon_sets_page.vue';
import JobsPage from '../pages/jobs_page.vue';
import CronJobsPage from '../pages/cron_jobs_page.vue';
import ServicesPage from '../pages/services_page.vue';
import {
PODS_ROUTE_NAME,
@ -22,6 +23,8 @@ import {
JOBS_ROUTE_PATH,
CRON_JOBS_ROUTE_NAME,
CRON_JOBS_ROUTE_PATH,
SERVICES_ROUTE_NAME,
SERVICES_ROUTE_PATH,
} from './constants';
export default [
@ -81,4 +84,12 @@ export default [
title: s__('KubernetesDashboard|CronJobs'),
},
},
{
name: SERVICES_ROUTE_NAME,
path: SERVICES_ROUTE_PATH,
component: ServicesPage,
meta: {
title: s__('KubernetesDashboard|Services'),
},
},
];

View File

@ -30,6 +30,11 @@ export default {
mounted() {
const updateRemainingTime = () => {
const remainingMilliseconds = calculateRemainingMilliseconds(this.endDateString);
if (remainingMilliseconds < 1) {
this.$emit('timer-expired');
}
this.remainingTime = formatTime(remainingMilliseconds);
};

View File

@ -27,7 +27,7 @@ module Types
field :merge_pipelines_enabled,
GraphQL::Types::Boolean,
null: true,
description: 'Whether merge pipelines are enabled.',
description: 'Whether merged results pipelines are enabled.',
method: :merge_pipelines_enabled?
field :project,
Types::ProjectType,

View File

@ -288,6 +288,14 @@ class Member < ApplicationRecord
refresh_member_authorized_projects
end
after_create if: :update_organization_user? do
Organizations::OrganizationUser.upsert(
{ organization_id: source.organization_id, user_id: user_id, access_level: :default },
unique_by: [:organization_id, :user_id],
on_duplicate: :skip # Do not change access_level, could make :owner :default
)
end
attribute :notification_level, default: -> { NotificationSetting.levels[:global] }
class << self
@ -657,6 +665,12 @@ class Member < ApplicationRecord
user&.project_bot?
end
def update_organization_user?
return false unless Feature.enabled?(:update_organization_users, source.root_ancestor, type: :gitlab_com_derisk)
!invite? && source.organization.present?
end
def log_invitation_token_cleanup
return true unless Gitlab.com? && invite? && invite_accepted_at?

View File

@ -4,6 +4,11 @@ module Users
class PhoneNumberValidation < ApplicationRecord
include IgnorableColumns
# SMS send attempts subsequent to the first one will have wait times of 1
# min, 3 min, 5 min after each one respectively. Wait time between the fifth
# attempt and so on will be 10 minutes.
SMS_SEND_WAIT_TIMES = [1.minute, 3.minutes, 5.minutes, 10.minutes].freeze
self.primary_key = :user_id
self.table_name = 'user_phone_number_validations'
@ -62,5 +67,18 @@ module Users
def validated?
validated_at.present?
end
def sms_send_allowed_after
return unless Feature.enabled?(:sms_send_wait_time, user)
# first send is allowed anytime
return if sms_send_count < 1
return unless sms_sent_at
max_wait_time = SMS_SEND_WAIT_TIMES.last
wait_time = SMS_SEND_WAIT_TIMES.fetch(sms_send_count - 1, max_wait_time)
sms_sent_at + wait_time
end
end
end

View File

@ -3,6 +3,7 @@
module Organizations
class OrganizationPolicy < BasePolicy
condition(:organization_user) { @subject.user?(@user) }
condition(:organization_owner) { @subject.owner?(@user) }
desc 'Organization is public'
condition(:public_organization, scope: :subject, score: 0) { true }
@ -18,11 +19,14 @@ module Organizations
enable :read_organization_user
end
rule { organization_user }.policy do
rule { organization_owner }.policy do
enable :admin_organization
enable :create_group
end
rule { organization_user }.policy do
enable :read_organization
enable :read_organization_user
enable :create_group
end
end
end

View File

@ -345,6 +345,15 @@
:weight: 1
:idempotent: false
:tags: []
- :name: cronjob:click_house_event_authors_consistency_cron
:worker_name: ClickHouse::EventAuthorsConsistencyCronWorker
:feature_category: :value_stream_management
:has_external_dependencies: true
:urgency: :low
:resource_boundary: :unknown
:weight: 1
:idempotent: true
:tags: []
- :name: cronjob:click_house_events_sync
:worker_name: ClickHouse::EventsSyncWorker
:feature_category: :value_stream_management

View File

@ -0,0 +1,121 @@
# frozen_string_literal: true
module ClickHouse
# rubocop: disable CodeReuse/ActiveRecord -- Building worker-specific ActiveRecord and ClickHouse queries
class EventAuthorsConsistencyCronWorker
include ApplicationWorker
include ClickHouseWorker
include Gitlab::ExclusiveLeaseHelpers
include Gitlab::Utils::StrongMemoize
idempotent!
queue_namespace :cronjob
data_consistency :delayed
worker_has_external_dependencies! # the worker interacts with a ClickHouse database
feature_category :value_stream_management
MAX_TTL = 5.minutes.to_i
MAX_RUNTIME = 150.seconds
MAX_AUTHOR_DELETIONS = 2000
CLICK_HOUSE_BATCH_SIZE = 100_000
POSTGRESQL_BATCH_SIZE = 2500
def perform
return unless enabled?
runtime_limiter = Analytics::CycleAnalytics::RuntimeLimiter.new(MAX_RUNTIME)
in_lock(self.class.to_s, ttl: MAX_TTL, retries: 0) do
author_records_to_delete = []
last_processed_id = 0
iterator.each_batch(column: :author_id, of: CLICK_HOUSE_BATCH_SIZE) do |scope|
query = scope.select(Arel.sql('DISTINCT author_id')).to_sql
ids_from_click_house = connection.select(query).pluck('author_id').sort
ids_from_click_house.each_slice(POSTGRESQL_BATCH_SIZE) do |ids|
author_records_to_delete.concat(missing_user_ids(ids))
last_processed_id = ids.last
to_be_deleted_size = author_records_to_delete.size
if to_be_deleted_size >= MAX_AUTHOR_DELETIONS
metadata.merge!(status: :deletion_limit_reached, deletions: to_be_deleted_size)
break
end
if runtime_limiter.over_time?
metadata.merge!(status: :over_time, deletions: to_be_deleted_size)
break
end
end
break if limit_was_reached?
end
delete_records_from_click_house(author_records_to_delete)
last_processed_id = 0 if table_fully_processed?
ClickHouse::SyncCursor.update_cursor_for(:event_authors_consistency_check, last_processed_id)
log_extra_metadata_on_done(:result, metadata)
end
end
private
def metadata
@metadata ||= { status: :processed, deletions: 0 }
end
def limit_was_reached?
metadata[:status] == :deletion_limit_reached || metadata[:status] == :over_time
end
def table_fully_processed?
metadata[:status] == :processed
end
def enabled?
ClickHouse::Client.database_configured?(:main) && Feature.enabled?(:event_sync_worker_for_click_house)
end
def previous_author_id
value = ClickHouse::SyncCursor.cursor_for(:event_authors_consistency_check)
value == 0 ? nil : value
end
strong_memoize_attr :previous_author_id
def iterator
builder = ClickHouse::QueryBuilder.new('event_authors')
ClickHouse::Iterator.new(query_builder: builder, connection: connection, min_value: previous_author_id)
end
def connection
@connection ||= ClickHouse::Connection.new(:main)
end
def missing_user_ids(ids)
value_list = Arel::Nodes::ValuesList.new(ids.map { |id| [id] })
User
.from("(#{value_list.to_sql}) AS user_ids(id)")
.where('NOT EXISTS (SELECT 1 FROM users WHERE id = user_ids.id)')
.pluck(:id)
end
def delete_records_from_click_house(ids)
query = ClickHouse::Client::Query.new(
raw_query: "DELETE FROM events WHERE author_id IN ({author_ids:Array(UInt64)})",
placeholders: { author_ids: ids.to_json }
)
connection.execute(query)
query = ClickHouse::Client::Query.new(
raw_query: "DELETE FROM event_authors WHERE author_id IN ({author_ids:Array(UInt64)})",
placeholders: { author_ids: ids.to_json }
)
connection.execute(query)
end
end
# rubocop: enable CodeReuse/ActiveRecord
end

View File

@ -0,0 +1,8 @@
---
name: sms_send_wait_time
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/137850
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/432975
milestone: '16.8'
type: development
group: group::anti-abuse
default_enabled: false

View File

@ -0,0 +1,9 @@
---
name: update_organization_users
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/419366
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139188
rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/435868
milestone: '16.8'
group: group::tenant scale
type: gitlab_com_derisk
default_enabled: false

View File

@ -0,0 +1,9 @@
---
name: ai_duo_chat_switch
feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/434802
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/140352
rollout_issue_url: https://gitlab.com/gitlab-com/gl-infra/production/-/issues/17301
milestone: '16.8'
group: group::ai framework
type: ops
default_enabled: true

View File

@ -900,6 +900,9 @@ Gitlab.ee do
Settings.cron_jobs['click_house_events_sync_worker'] ||= {}
Settings.cron_jobs['click_house_events_sync_worker']['cron'] ||= "*/3 * * * *"
Settings.cron_jobs['click_house_events_sync_worker']['job_class'] = 'ClickHouse::EventsSyncWorker'
Settings.cron_jobs['click_house_event_authors_consistency_cron_worker'] ||= {}
Settings.cron_jobs['click_house_event_authors_consistency_cron_worker']['cron'] ||= "*/30 * * * *"
Settings.cron_jobs['click_house_event_authors_consistency_cron_worker']['job_class'] = 'ClickHouse::EventAuthorsConsistencyCronWorker'
Settings.cron_jobs['vertex_ai_refresh_access_token_worker'] ||= {}
Settings.cron_jobs['vertex_ai_refresh_access_token_worker']['cron'] ||= '*/50 * * * *'
Settings.cron_jobs['vertex_ai_refresh_access_token_worker']['job_class'] = 'Llm::VertexAiAccessTokenRefreshWorker'

View File

@ -28,4 +28,12 @@ if RUBY_PLATFORM.include?('darwin')
time_zone_name = CFTimeZone.CFTimeZoneGetName(default_time_zone)
CFTimeZone.CFRelease(time_zone_name)
CFTimeZone.CFRelease(default_time_zone)
# With curl v8.2.0, the thread unsafe macOS API call to
# SCDynamicStoreCopyProxies has been moved to the global init function
# (https://github.com/curl/curl/issues/11252). The Elasticsearch
# gem uses Typhoeus, which uses Ethon to wrap libcurl.
# Init curl to ensure Spring works
# (https://github.com/elastic/elasticsearch-ruby/issues/2244).
Ethon::Curl.init
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class AddSmsSentAtAndSmsSendCountToPhoneNumberValidations < Gitlab::Database::Migration[2.2]
milestone '16.8'
enable_lock_retries!
def up
add_column :user_phone_number_validations, :sms_sent_at, :datetime_with_timezone, null: true
add_column :user_phone_number_validations, :sms_send_count, :smallint, default: 0, null: false
end
def down
remove_column :user_phone_number_validations, :sms_sent_at, if_exists: true
remove_column :user_phone_number_validations, :sms_send_count, if_exists: true
end
end

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
class AddAdminTerraformStateToMemberRoles < Gitlab::Database::Migration[2.2]
milestone '16.8'
enable_lock_retries!
def change
add_column :member_roles, :admin_terraform_state, :boolean, default: false, null: false
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class CreateIndexOnIdConvertToBigintForSystemNoteMetadataAsync < Gitlab::Database::Migration[2.2]
milestone '16.8'
TABLE_NAME = :system_note_metadata
INDEX_NAME = 'index_system_note_metadata_pkey_on_id_convert_to_bigint'
def up
prepare_async_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: INDEX_NAME
end
def down
unprepare_async_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: INDEX_NAME
end
end

View File

@ -0,0 +1 @@
bfa32c41d867fa4de24ac0a81d1f99f14e868b2c5bd453f799e1a3b3eebd1d51

View File

@ -0,0 +1 @@
d0cb92dc098f069e02d457f7c497dc24f544f6a27a8426dcd3446ad16bd9cc44

View File

@ -0,0 +1 @@
b40f751b4b06dd94de38e3fa260e07e56359828ca1ae1799ca4d65bd873fa8af

View File

@ -18948,6 +18948,7 @@ CREATE TABLE member_roles (
archive_project boolean DEFAULT false NOT NULL,
manage_group_access_tokens boolean DEFAULT false NOT NULL,
remove_project boolean DEFAULT false NOT NULL,
admin_terraform_state boolean DEFAULT false NOT NULL,
CONSTRAINT check_4364846f58 CHECK ((char_length(description) <= 255)),
CONSTRAINT check_9907916995 CHECK ((char_length(name) <= 255))
);
@ -24978,6 +24979,8 @@ CREATE TABLE user_phone_number_validations (
country text NOT NULL,
phone_number text NOT NULL,
telesign_reference_xid text,
sms_sent_at timestamp with time zone,
sms_send_count smallint DEFAULT 0 NOT NULL,
CONSTRAINT check_193736da9f CHECK ((char_length(country) <= 3)),
CONSTRAINT check_d2f31fc815 CHECK ((char_length(phone_number) <= 12)),
CONSTRAINT check_d7af4d3eb5 CHECK ((char_length(telesign_reference_xid) <= 255))

View File

@ -470,3 +470,12 @@ The content of each log file is listed in chronological order. To minimize perfo
### Audit Events **(PREMIUM SELF)**
The **Audit Events** page lists changes made within the GitLab server. With this information you can control, analyze, and track every change.
### Statistics
The **Instance overview** section of the Dashboard lists the current statistics of the GitLab instance. This information is retrieved using the [Application statistics API](../api/statistics.md#get-current-application-statistics).
NOTE:
These statistics show exact counts for values less than 10,000. For values of 10,000 and higher, these statistics show approximate data
when [TablesampleCountStrategy](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/count/tablesample_count_strategy.rb?ref_type=heads#L16) and [ReltuplesCountStrategy](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/count/reltuples_count_strategy.rb?ref_type=heads) strategies are used for calculations.
.

View File

@ -6261,7 +6261,7 @@ Input type: `ProjectCiCdSettingsUpdateInput`
| <a id="mutationprojectcicdsettingsupdateinboundjobtokenscopeenabled"></a>`inboundJobTokenScopeEnabled` | [`Boolean`](#boolean) | Indicates CI/CD job tokens generated in other projects have restricted access to this project. |
| <a id="mutationprojectcicdsettingsupdatejobtokenscopeenabled"></a>`jobTokenScopeEnabled` **{warning-solid}** | [`Boolean`](#boolean) | **Deprecated:** Outbound job token scope is being removed. This field can now only be set to false. Deprecated in 16.0. |
| <a id="mutationprojectcicdsettingsupdatekeeplatestartifact"></a>`keepLatestArtifact` | [`Boolean`](#boolean) | Indicates if the latest artifact should be kept for the project. |
| <a id="mutationprojectcicdsettingsupdatemergepipelinesenabled"></a>`mergePipelinesEnabled` | [`Boolean`](#boolean) | Indicates if merge pipelines are enabled for the project. |
| <a id="mutationprojectcicdsettingsupdatemergepipelinesenabled"></a>`mergePipelinesEnabled` | [`Boolean`](#boolean) | Indicates if merged results pipelines are enabled for the project. |
| <a id="mutationprojectcicdsettingsupdatemergetrainsenabled"></a>`mergeTrainsEnabled` | [`Boolean`](#boolean) | Indicates if merge trains are enabled for the project. |
| <a id="mutationprojectcicdsettingsupdatemergetrainsskiptrainallowed"></a>`mergeTrainsSkipTrainAllowed` | [`Boolean`](#boolean) | Indicates whether an option is allowed to merge without refreshing the merge train. Ignored unless the `merge_trains_skip_train` feature flag is also enabled. |
@ -6800,7 +6800,7 @@ Input type: `RunnersExportUsageInput`
| ---- | ---- | ----------- |
| <a id="mutationrunnersexportusageclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationrunnersexportusagefromdate"></a>`fromDate` | [`ISO8601Date`](#iso8601date) | UTC start date of the period to report on. Defaults to the start of last full month. |
| <a id="mutationrunnersexportusagemaxprojectcount"></a>`maxProjectCount` | [`Int`](#int) | Maximum number of projects to return. All other runner usage will be attributed to a '<Other projects>' entry. Defaults to 1000 projects. |
| <a id="mutationrunnersexportusagemaxprojectcount"></a>`maxProjectCount` | [`Int`](#int) | Maximum number of projects to return. All other runner usage will be attributed to an `<Other projects>` entry. Defaults to 1000 projects. |
| <a id="mutationrunnersexportusagetodate"></a>`toDate` | [`ISO8601Date`](#iso8601date) | UTC end date of the period to report on. " \ "Defaults to the end of the month specified by `fromDate`. |
| <a id="mutationrunnersexportusagetype"></a>`type` | [`CiRunnerType`](#cirunnertype) | Scope of the runners to include in the report. |
@ -25854,7 +25854,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="projectcicdsettinginboundjobtokenscopeenabled"></a>`inboundJobTokenScopeEnabled` | [`Boolean`](#boolean) | Indicates CI/CD job tokens generated in other projects have restricted access to this project. |
| <a id="projectcicdsettingjobtokenscopeenabled"></a>`jobTokenScopeEnabled` | [`Boolean`](#boolean) | Indicates CI/CD job tokens generated in this project have restricted access to other projects. |
| <a id="projectcicdsettingkeeplatestartifact"></a>`keepLatestArtifact` | [`Boolean`](#boolean) | Whether to keep the latest builds artifacts. |
| <a id="projectcicdsettingmergepipelinesenabled"></a>`mergePipelinesEnabled` | [`Boolean`](#boolean) | Whether merge pipelines are enabled. |
| <a id="projectcicdsettingmergepipelinesenabled"></a>`mergePipelinesEnabled` | [`Boolean`](#boolean) | Whether merged results pipelines are enabled. |
| <a id="projectcicdsettingmergetrainsenabled"></a>`mergeTrainsEnabled` | [`Boolean`](#boolean) | Whether merge trains are enabled. |
| <a id="projectcicdsettingmergetrainsskiptrainallowed"></a>`mergeTrainsSkipTrainAllowed` | [`Boolean!`](#boolean) | Whether merge immediately is allowed for merge trains. |
| <a id="projectcicdsettingproject"></a>`project` | [`Project`](#project) | Project the CI/CD settings belong to. |
@ -30859,6 +30859,7 @@ Member role permission.
| ----- | ----------- |
| <a id="memberrolepermissionadmin_group_member"></a>`ADMIN_GROUP_MEMBER` | Allows to admin group members. |
| <a id="memberrolepermissionadmin_merge_request"></a>`ADMIN_MERGE_REQUEST` | Allows to approve merge requests. |
| <a id="memberrolepermissionadmin_terraform_state"></a>`ADMIN_TERRAFORM_STATE` | Allows to admin terraform state. |
| <a id="memberrolepermissionadmin_vulnerability"></a>`ADMIN_VULNERABILITY` | Allows admin access to the vulnerability reports. |
| <a id="memberrolepermissionarchive_project"></a>`ARCHIVE_PROJECT` | Allows to archive projects. |
| <a id="memberrolepermissionmanage_group_access_tokens"></a>`MANAGE_GROUP_ACCESS_TOKENS` | Allows manage access to the group access tokens. |

View File

@ -19,6 +19,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> - [Archive project introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134998) in GitLab 16.7.
> - [Delete project introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139696) in GitLab 16.8.
> - [Manage group access tokens introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/140115) in GitLab 16.8.
> - [Admin terraform state introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/140759) in GitLab 16.8.
FLAG:
On self-managed GitLab, by default these features are not available. To make them available, an administrator can [enable the feature flags](../administration/feature_flags.md) named `admin_group_member` and `manage_project_access_tokens`.
@ -46,6 +47,7 @@ If successful, returns [`200`](rest/index.md#status-codes) and the following res
| `[].group_id` | integer | The ID of the group that the member role belongs to. |
| `[].base_access_level` | integer | Base access level for member role. Valid values are 10 (Guest), 20 (Reporter), 30 (Developer), 40 (Maintainer), or 50 (Owner).|
| `[].admin_merge_request` | boolean | Permission to admin project merge requests and enables the ability to `download_code`. |
| `[].admin_terraform_state` | boolean | Permission to admin project terraform state. |
| `[].admin_vulnerability` | boolean | Permission to admin project vulnerabilities. |
| `[].read_code` | boolean | Permission to read project code. |
| `[].read_dependency` | boolean | Permission to read project dependencies. |
@ -73,6 +75,7 @@ Example response:
"group_id": 84,
"base_access_level": 10,
"admin_merge_request": false,
"admin_terraform_state": false,
"admin_vulnerability": false,
"read_code": true,
"read_dependency": false,
@ -88,8 +91,9 @@ Example response:
"description: "Custom guest that read and admin security entities",
"group_id": 84,
"base_access_level": 10,
"admin_merge_request": false,
"admin_vulnerability": true,
"admin_merge_request": false,
"admin_terraform_state": false,
"read_code": false,
"read_dependency": true,
"read_vulnerability": true,
@ -120,6 +124,7 @@ To add a member role to a group, the group must be at root-level (have no parent
| `description` | string | no | The description of the member role. |
| `base_access_level` | integer | yes | Base access level for configured role. Valid values are 10 (Guest), 20 (Reporter), 30 (Developer), 40 (Maintainer), or 50 (Owner).|
| `admin_merge_request` | boolean | no | Permission to admin project merge requests. |
| `admin_terraform_state` | boolean | no | Permission to admin project terraform state. |
| `admin_vulnerability` | boolean | no | Permission to admin project vulnerabilities. |
| `read_code` | boolean | no | Permission to read project code. |
| `read_dependency` | boolean | no | Permission to read project dependencies. |
@ -135,6 +140,7 @@ If successful, returns [`201`](rest/index.md#status-codes) and the following att
| `group_id` | integer | The ID of the group that the member role belongs to. |
| `base_access_level` | integer | Base access level for member role. |
| `admin_merge_request` | boolean | Permission to admin project merge requests. |
| `admin_terraform_state` | boolean | Permission to admin project terraform state. |
| `admin_vulnerability` | boolean | Permission to admin project vulnerabilities. |
| `read_code` | boolean | Permission to read project code. |
| `read_dependency` | boolean | Permission to read project dependencies. |

View File

@ -1538,7 +1538,7 @@ curl --request POST --header "PRIVATE-TOKEN: <your-token>" \
| `jobs_enabled` | boolean | No | _(Deprecated)_ Enable jobs for this project. Use `builds_access_level` instead. |
| `lfs_enabled` | boolean | No | Enable LFS. |
| `merge_method` | string | No | Set the [merge method](#project-merge-method) used. |
| `merge_pipelines_enabled` | boolean | No | Enable or disable merge pipelines. |
| `merge_pipelines_enabled` | boolean | No | Enable or disable merged results pipelines. |
| `merge_requests_access_level` | string | No | One of `disabled`, `private`, or `enabled`. |
| `merge_requests_enabled` | boolean | No | _(Deprecated)_ Enable merge requests for this project. Use `merge_requests_access_level` instead. |
| `merge_trains_enabled` | boolean | No | Enable or disable merge trains. |
@ -1739,7 +1739,7 @@ Supported attributes:
| `lfs_enabled` | boolean | No | Enable LFS. |
| `merge_commit_template` | string | No | [Template](../user/project/merge_requests/commit_templates.md) used to create merge commit message in merge requests. _([Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/20263) in GitLab 14.5.)_ |
| `merge_method` | string | No | Set the [merge method](#project-merge-method) used. |
| `merge_pipelines_enabled` | boolean | No | Enable or disable merge pipelines. |
| `merge_pipelines_enabled` | boolean | No | Enable or disable merged results pipelines. |
| `merge_requests_access_level` | string | No | One of `disabled`, `private`, or `enabled`. |
| `merge_requests_enabled` | boolean | No | _(Deprecated)_ Enable merge requests for this project. Use `merge_requests_access_level` instead. |
| `merge_requests_template` **(PREMIUM ALL)** | string | No | Default description for merge requests. Description is parsed with GitLab Flavored Markdown. See [Templates for issues and merge requests](#templates-for-issues-and-merge-requests). |

View File

@ -12,7 +12,8 @@ List the current statistics of the GitLab instance. You have to be an
administrator to perform this action.
NOTE:
These statistics are approximate.
These statistics show exact counts for values less than 10,000. For values of 10,000 and higher, these statistics show approximate data
when [TablesampleCountStrategy](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/count/tablesample_count_strategy.rb?ref_type=heads#L16) and [ReltuplesCountStrategy](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/database/count/reltuples_count_strategy.rb?ref_type=heads) strategies are used for calculations.
```plaintext
GET /application/statistics

View File

@ -202,3 +202,10 @@ To see what polyfills are being used:
### 9. Why is my page broken in dark mode?
See [dark mode docs](dark_mode.md)
### 10. How to render GitLab-flavored Markdown?
If you need to render [GitLab-flavored Markdown](../gitlab_flavored_markdown/index.md), then there are two things that you require:
- Pass the GLFM content with the `v-safe-html` directive to a `div` HTML element inside your Vue component
- Add the `md` class to the root div, which will apply the appropriate CSS styling

View File

@ -48,6 +48,7 @@ Matches use:
| The `nightly` and `canary` pipelines | `only: { pipeline: [:nightly, :canary] }` | ["nightly scheduled pipeline"](https://gitlab.com/gitlab-org/gitlab/-/pipeline_schedules) and ["canary"](https://gitlab.com/gitlab-org/quality/canary) |
| The `ee:instance` job | `only: { job: 'ee:instance' }` | The `ee:instance` job in any pipeline |
| Any `quarantine` job | `only: { job: '.*quarantine' }` | Any job ending in `quarantine` in any pipeline |
| Local development environment | `only: :local` | Any environment where `Runtime::Env.running_in_ci?` is false |
| Any run where condition evaluates to a truthy value | `only: { condition: -> { ENV['TEST_ENV'] == 'true' } }` | Any run where `TEST_ENV` is set to true
```ruby

View File

@ -894,17 +894,15 @@ you can use the `MAVEN_CLI_OPTS` CI/CD variable.
Read more on [how to use private Maven repositories](../index.md#using-private-maven-repositories).
#### FIPS-enabled images
### FIPS-enabled images
> Introduced in GitLab 14.10. GitLab team members can view more information in this confidential issue: `https://gitlab.com/gitlab-org/gitlab/-/issues/354796`
> - Introduced in GitLab 14.10. GitLab team members can view more information in this confidential issue: `https://gitlab.com/gitlab-org/gitlab/-/issues/354796`
> - Introduced in GitLab 15.0 - Gemnasium uses FIPS-enabled images when FIPS mode is enabled.
GitLab also offers [FIPS-enabled Red Hat UBI](https://www.redhat.com/en/blog/introducing-red-hat-universal-base-image)
versions of the Gemnasium images. You can therefore replace standard images with FIPS-enabled images.
Gemnasium scanning jobs automatically use FIPS-enabled image when FIPS mode is enabled in the GitLab instance.
([Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/357922) in GitLab 15.0.)
To manually switch to FIPS-enabled images, set the variable `DS_IMAGE_SUFFIX` to `"-fips"`.
versions of the Gemnasium images. When FIPS mode is enabled in the GitLab instance, Gemnasium
scanning jobs automatically use the FIPS-enabled images. To manually switch to FIPS-enabled images,
set the variable `DS_IMAGE_SUFFIX` to `"-fips"`.
Dependency scanning for Gradle projects and auto-remediation for Yarn projects are not supported in FIPS mode.

View File

@ -26,7 +26,7 @@ are supported.
| Rewrites (other than `200`) | **{dotted-circle}** No | `/en/* /en/404.html 404` |
| Query parameters | **{dotted-circle}** No | `/store id=:id /blog/:id 301` |
| Force ([shadowing](https://docs.netlify.com/routing/redirects/rewrites-proxies/#shadowing)) | **{dotted-circle}** No | `/app/ /app/index.html 200!` |
| Domain-level redirects | **{dotted-circle}** No | `http://blog.example.com/* https://www.example.com/blog/:splat 301` |
| [Domain-level redirects](#domain-level-redirects) | **{check-circle}** Yes | `http://blog.example.com/* https://www.example.com/blog/:splat 301` |
| Redirect by country or language | **{dotted-circle}** No | `/ /anz 302 Country=au,nz` |
| Redirect by role | **{dotted-circle}** No | `/admin/* 200! Role=admin` |
@ -119,6 +119,30 @@ request matches the `from`:
This status code can be used in combination with [splat rules](#splats) to dynamically
rewrite the URL.
## Domain-level redirects
> [Introduced](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/936) in GitLab 16.8 [with a flag](../../../administration/feature_flags.md) named `FF_ENABLE_DOMAIN_REDIRECT`. Disabled by default.
To create a domain-level redirect, add a domain-level path (beginning with `http://`
or `https://`) to either:
- The `to` path only.
- The `from` and `to` paths.
The supported [HTTP status codes](#http-status-codes) are `301` and `302`:
```plaintext
# 301 permanent redirect
http://blog.example.com/file_1.html https://www.example.com/blog/file_1.html 301
/file_2.html https://www.example.com/blog/file_2.html 301
# 302 temporary redirect
http://blog.example.com/file_3.html https://www.example.com/blog/file_3.html 302
/file_4.html https://www.example.com/blog/file_4.html 302
```
Domain-level redirects can be used in combination with [splat rules](#splats) (including splat placeholders)
to dynamically rewrite the URL path.
## Splats
> [Introduced](https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/458) in GitLab 14.3.

View File

@ -301,6 +301,8 @@ When you create a merge request, the workflow checks the name of the branch. If
branch name matches the workflow, the merge request targets the branch you specify. If the branch name does not match, the merge request targets the
default branch of the project.
Rules are processed on a "first-match" basis - if two rules match the same branch name, the top-most rule is applied.
Prerequisites:
- You must have at least the Maintainer role.

View File

@ -22,9 +22,10 @@ module ClickHouse
# builder = ClickHouse::QueryBuilder.new('event_authors').where(type: 'some_type')
class Iterator
# rubocop: disable CodeReuse/ActiveRecord -- this is a ClickHouse query builder class usin Arel
def initialize(query_builder:, connection:)
def initialize(query_builder:, connection:, min_value: nil)
@query_builder = query_builder
@connection = connection
@min_value = min_value
end
def each_batch(column: :id, of: 10_000)
@ -36,18 +37,18 @@ module ClickHouse
row = connection.select(min_max_query.to_sql).first
return if row.nil?
min_value = row['min']
max_value = row['max']
return if max_value == 0
min = min_value || row['min']
max = row['max']
return if max == 0
loop do
break if min_value > max_value
break if min > max
yield query_builder
.where(table[column].gteq(min_value))
.where(table[column].lt(min_value + of))
.where(table[column].gteq(min))
.where(table[column].lt(min + of))
min_value += of
min += of
end
end
@ -55,7 +56,7 @@ module ClickHouse
delegate :table, to: :query_builder
attr_reader :query_builder, :connection
attr_reader :query_builder, :connection, :min_value
# rubocop: enable CodeReuse/ActiveRecord
end
end

View File

@ -132,7 +132,6 @@ module Gitlab
uuid: uuid,
report_type: report.type,
name: finding_name(data, identifiers, location),
compare_key: data['cve'] || '',
location: location,
evidence: evidence,
severity: ::Enums::Vulnerability.parse_severity_level(data['severity']),

View File

@ -7,7 +7,6 @@ module Gitlab
class Finding
include ::VulnerabilityFindingHelpers
attr_reader :compare_key
attr_reader :confidence
attr_reader :identifiers
attr_reader :flags
@ -34,10 +33,7 @@ module Gitlab
delegate :file_path, :start_line, :end_line, to: :location
alias_method :cve, :compare_key
def initialize(compare_key:, identifiers:, flags: [], links: [], remediations: [], location:, evidence:, metadata_version:, name:, original_data:, report_type:, scanner:, scan:, uuid:, confidence: nil, severity: nil, details: {}, signatures: [], project_id: nil, vulnerability_finding_signatures_enabled: false, found_by_pipeline: nil, cvss: []) # rubocop:disable Metrics/ParameterLists
@compare_key = compare_key
def initialize(identifiers:, flags: [], links: [], remediations: [], location:, evidence:, metadata_version:, name:, original_data:, report_type:, scanner:, scan:, uuid:, confidence: nil, severity: nil, details: {}, signatures: [], project_id: nil, vulnerability_finding_signatures_enabled: false, found_by_pipeline: nil, cvss: []) # rubocop:disable Metrics/ParameterLists
@confidence = confidence
@identifiers = identifiers
@flags = flags
@ -65,7 +61,6 @@ module Gitlab
def to_hash
%i[
compare_key
confidence
identifiers
flags
@ -84,7 +79,6 @@ module Gitlab
details
signatures
description
cve
solution
].index_with do |key|
public_send(key) # rubocop:disable GitlabSecurity/PublicSend
@ -141,7 +135,7 @@ module Gitlab
def <=>(other)
if severity == other.severity
compare_key <=> other.compare_key
uuid <=> other.uuid
else
::Enums::Vulnerability.severity_levels[other.severity] <=>
::Enums::Vulnerability.severity_levels[severity]
@ -200,7 +194,7 @@ module Gitlab
private
def generate_project_fingerprint
Digest::SHA1.hexdigest(compare_key)
Digest::SHA1.hexdigest(uuid.to_s)
end
def location_fingerprints

View File

@ -30,21 +30,6 @@ module Gitlab::UsageDataCounters
Gitlab::Template::GitlabCiYmlTemplate.find(template_name.chomp('.gitlab-ci.yml'))&.full_name
end
def all_included_templates(template_name)
expanded_template_name = expand_template_name(template_name)
results = [expanded_template_name].tap do |result|
template = Gitlab::Template::GitlabCiYmlTemplate.find(template_name.chomp('.gitlab-ci.yml'))
data = Gitlab::Ci::Config::Yaml::Loader.new(template.content).load.content
[data[:include]].compact.flatten.each do |ci_include|
if ci_include_template = ci_include[:template]
result.concat(all_included_templates(ci_include_template))
end
end
end
results.uniq.sort_by { _1['name'] }
end
private
def template_to_event_name(template)

View File

@ -48,39 +48,5 @@ namespace :gitlab do
FileUtils.mkdir_p(path)
File.write(File.join(path, 'sql_metrics_queries.json'), Gitlab::Json.pretty_generate(queries))
end
# Events for templates included via YAML-less Auto-DevOps
def implicit_auto_devops_includes
Gitlab::UsageDataCounters::CiTemplateUniqueCounter
.all_included_templates('Auto-DevOps.gitlab-ci.yml')
.map { |template| implicit_auto_devops_event(template) }
.uniq
.sort_by { _1['name'] }
end
# Events for templates included in a .gitlab-ci.yml using include:template
def explicit_template_includes
Gitlab::UsageDataCounters::CiTemplateUniqueCounter.ci_templates("lib/gitlab/ci/templates/").each_with_object([]) do |template, result|
expanded_template_name = Gitlab::UsageDataCounters::CiTemplateUniqueCounter.expand_template_name(template)
next unless expanded_template_name # guard against templates unavailable on FOSS
event_name = Gitlab::UsageDataCounters::CiTemplateUniqueCounter.ci_template_event_name(expanded_template_name, :repository_source)
result << ci_template_event(event_name)
end
end
# rubocop:disable Gitlab/NoCodeCoverageComment
# :nocov: remove in https://gitlab.com/gitlab-org/gitlab/-/issues/299453
def ci_template_event(event_name)
{ 'name' => event_name }
end
# :nocov:
# rubocop:enable Gitlab/NoCodeCoverageComment
def implicit_auto_devops_event(expanded_template_name)
event_name = Gitlab::UsageDataCounters::CiTemplateUniqueCounter.ci_template_event_name(expanded_template_name, :auto_devops_source)
ci_template_event(event_name)
end
end
end

View File

@ -10516,9 +10516,6 @@ msgstr ""
msgid "CiCatalog|Remove project from the CI/CD Catalog?"
msgstr ""
msgid "CiCatalog|Repositories of pipeline components available in this namespace."
msgstr ""
msgid "CiCatalog|Search must be at least 3 characters"
msgstr ""
@ -16718,6 +16715,9 @@ msgstr ""
msgid "Dependencies|Toggle vulnerability list"
msgstr ""
msgid "Dependencies|Unknown path"
msgstr ""
msgid "Dependencies|Unsupported file(s) detected"
msgstr ""
@ -24692,6 +24692,9 @@ msgstr ""
msgid "IdentityVerification|Didn't receive a code? %{codeLinkStart}Send a new code%{codeLinkEnd} or %{phoneLinkStart}enter a new phone number%{phoneLinkEnd}"
msgstr ""
msgid "IdentityVerification|Didn't receive a code? Send a new code in %{timer} or %{phoneLinkStart}enter a new phone number%{phoneLinkEnd}"
msgstr ""
msgid "IdentityVerification|Email update is only offered once."
msgstr ""
@ -24761,6 +24764,9 @@ msgstr ""
msgid "IdentityVerification|Send code"
msgstr ""
msgid "IdentityVerification|Send code in %{timer}"
msgstr ""
msgid "IdentityVerification|Something went wrong. Please try again."
msgstr ""
@ -28212,6 +28218,12 @@ msgstr ""
msgid "KubernetesDashboard|Running"
msgstr ""
msgid "KubernetesDashboard|Service"
msgstr ""
msgid "KubernetesDashboard|Services"
msgstr ""
msgid "KubernetesDashboard|StatefulSet"
msgstr ""
@ -35817,9 +35829,6 @@ msgstr ""
msgid "Pipelines|CI lint"
msgstr ""
msgid "Pipelines|CI/CD Catalog"
msgstr ""
msgid "Pipelines|Child pipeline (%{linkStart}parent%{linkEnd})"
msgstr ""
@ -56332,10 +56341,10 @@ msgstr ""
msgid "You do not belong to any projects yet."
msgstr ""
msgid "You do not have access to AI features."
msgid "You do not have access to any projects for creating incidents."
msgstr ""
msgid "You do not have access to any projects for creating incidents."
msgid "You do not have access to chat feature."
msgstr ""
msgid "You do not have any subscriptions yet"

View File

@ -1,5 +1,5 @@
ARG DOCKER_VERSION=24.0.5
ARG CHROME_VERSION=113
ARG CHROME_VERSION=119
ARG RUBY_VERSION=3.0
ARG QA_BUILD_TARGET=ee

View File

@ -3,7 +3,7 @@
source 'https://rubygems.org'
gem 'gitlab-qa', '~> 13', '>= 13.1.0', require: 'gitlab/qa'
gem 'gitlab_quality-test_tooling', '~> 1.10.1', require: false
gem 'gitlab_quality-test_tooling', '~> 1.11.0', require: false
gem 'gitlab-utils', path: '../gems/gitlab-utils'
gem 'activesupport', '~> 7.0.8' # This should stay in sync with the root's Gemfile
gem 'allure-rspec', '~> 2.23.0'

View File

@ -128,7 +128,7 @@ GEM
rainbow (>= 3, < 4)
table_print (= 1.5.7)
zeitwerk (>= 2, < 3)
gitlab_quality-test_tooling (1.10.1)
gitlab_quality-test_tooling (1.11.0)
activesupport (>= 6.1, < 7.2)
amatch (~> 0.4.1)
gitlab (~> 4.19)
@ -360,7 +360,7 @@ DEPENDENCIES
fog-google (~> 1.19)
gitlab-qa (~> 13, >= 13.1.0)
gitlab-utils!
gitlab_quality-test_tooling (~> 1.10.1)
gitlab_quality-test_tooling (~> 1.11.0)
influxdb-client (~> 3.0)
knapsack (~> 4.0)
nokogiri (~> 1.16)

View File

@ -8,7 +8,7 @@ module QA
module Env
extend self
attr_writer :personal_access_token, :admin_personal_access_token
attr_writer :personal_access_token, :admin_personal_access_token, :gitlab_url
attr_accessor :dry_run
ENV_VARIABLES = Gitlab::QA::Runtime::Env::ENV_VARIABLES

View File

@ -4,10 +4,20 @@ module QA
module Service
module DockerRun
class Gitlab < Base
def initialize(name:, omnibus_config: '', image: '')
attr_reader :external_url, :name
# @param [String] name
# @param [String] omnibus_config
# @param [String] image
# @param [String] ports Docker-formatted port exposition
# @see ports https://docs.docker.com/engine/reference/commandline/run/#publish
# @param [String] external_url
def initialize(name:, omnibus_config: '', image: '', ports: '80:80', external_url: Runtime::Env.gitlab_url)
@image = image
@name = name
@omnibus_configuration = omnibus_config
@ports = ports
@external_url = external_url
super()
end
@ -24,7 +34,7 @@ module QA
docker run -d --rm
--network #{network}
--hostname #{host_name}
--publish 80:80
--publish #{@ports}
#{RUBY_PLATFORM.include?('arm64') ? '--platform linux/amd64' : ''}
--env GITLAB_OMNIBUS_CONFIG="#{@omnibus_configuration}"
--name #{@name}

View File

@ -0,0 +1,84 @@
# frozen_string_literal: true
module QA
module Service
module Gitlab
class Instances
attr_reader :list
def initialize
@list = []
end
# Default omnibus configuration for a GitLab instance
# @param cell_url [String] the external url for the GitLab instance
def omnibus_configuration(cell_url:)
<<~OMNIBUS
gitlab_rails['lfs_enabled'] = true;
gitlab_rails['initial_root_password']= '#{Runtime::Env.initial_root_password}'
external_url '#{cell_url}';
OMNIBUS
end
# Sets the gitlab_url values so that gitlab-qa flows work on one of the instances
# @param instance [DockerRun::GitLab object] the GitLab instance to be used
def set_gitlab_urls(instance)
Support::GitlabAddress.define_gitlab_address_attribute!(instance.external_url)
Runtime::Env.gitlab_url = instance.external_url
Runtime::Scenario.define(:gitlab_address, instance.external_url)
end
# Creates a DockerRun::Gitlab instance and adds to the list of instances
# @param name [string] the name for the instance
# @param url [string] the URL for the instance
# @param external_port [string] the external port
# @param internal_port [string] the internal port to use instead of default (optional)
# @param omnibus_config [string] omnibus_configuration to use instead of default (optional)
# @return [Service::DockerRun::Gitlab] the last created GitLab instance
def add_gitlab_instance(name:, url:, external_port:, internal_port: '80', omnibus_config: nil)
cell_url = "http://#{url}/"
external_url = "http://#{url}:#{external_port}/"
ports = "#{external_port}:#{internal_port}"
omnibus_config ||= omnibus_configuration(cell_url: cell_url)
@list << Service::DockerRun::Gitlab.new(
image: Runtime::Env.release,
name: name,
ports: ports,
omnibus_config: omnibus_config,
external_url: external_url).tap do |gitlab|
gitlab.login
gitlab.register!
end
@list.last
end
# Waits for an instance to be healthy
# @param instance [DockerRun::GitLab object] the GitLab instance to be checked
def wait_for_instance(instance)
Support::Waiter.wait_until(max_duration: 900, sleep_interval: 10, raise_on_failure: true) do
instance.health == "healthy"
end
end
def wait_for_all_instances
@list.each { |el| wait_for_instance(el) }
end
def remove_all_instances
@list.each(&:remove!)
@list.clear
end
# Remove an instance with a given name
# @param instance_name [String] the name of the instance that was specified during initialization
def remove_instance(instance_name)
index = @list.index { |x| x.name == instance_name }
instance = @list.slice!(index)
instance.remove!
end
end
end
end
end

View File

@ -0,0 +1,50 @@
# frozen_string_literal: true
module QA
RSpec.describe 'Data Stores', :skip_live_env, :requires_admin, product_group: :tenant_scale do
describe 'Multiple Cells' do
let(:url) { 'gitlab-cells.bridge' }
let(:cells) { Service::Gitlab::Instances.new }
let!(:first_cell) do
cells.add_gitlab_instance(name: 'gitlab-first-cell',
external_port: '3000',
url: url)
end
let!(:second_cell) do
cells.add_gitlab_instance(name: 'gitlab-second-cell',
external_port: '3001',
url: url)
end
before do
cells.set_gitlab_urls(first_cell)
cells.wait_for_all_instances
# TODO: configure cells to be connected
page.visit first_cell.external_url
end
after do
cells.remove_all_instances
end
it(
'user logged into one Cell is logged into all',
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/433542',
only: :local
) do
Flow::Login.sign_in(as: create(:user))
page.visit second_cell.external_url
Page::Main::Menu.perform do |form|
expect(form).to be_signed_in
end
end
end
end
end

View File

@ -0,0 +1,23 @@
# frozen_string_literal: true
# version of the login test that only runs against GDK
module QA
RSpec.describe 'Data Stores', :skip_live_env, :requires_admin, product_group: :tenant_scale do
describe 'Multiple Cells' do
it(
'user logged into one Cell is logged into all',
testcase: 'https://gitlab.com/gitlab-org/gitlab/-/quality/test_cases/433548',
only: :local
) do
Flow::Login.sign_in(as: create(:user))
page.visit ENV.fetch('CELL2_URL')
Page::Main::Menu.perform do |form|
expect(form).to be_signed_in
end
end
end
end
end

View File

@ -34,6 +34,7 @@ module QA
options.each do |option|
opts[:domain] = production_domain(uri_tld) if option == :production
return run_locally? if option == :local
next unless option.is_a?(Hash)
@ -57,6 +58,10 @@ module QA
private
def run_locally?
!Runtime::Env.running_in_ci?
end
def evaluate_pipeline_context(pipeline)
return true if Runtime::Env.ci_project_name.blank?

View File

@ -324,6 +324,27 @@ RSpec.describe QA::Specs::Helpers::ContextSelector do
end
end
context 'local' do
it 'runs locally' do
stub_env('CI_JOB_NAME', nil)
group = describe_successfully 'Runs locally', :local do
it('runs locally') {}
end
expect(group.examples[0].execution_result.status).to eq(:passed)
end
it 'does not run in CI' do
stub_env('CI_JOB_NAME', 'ee:instance-image')
group = describe_successfully 'Does not run in CI' do
it('does not run in CI', only: :local) {}
end
expect(group.examples[0].execution_result.status).to eq(:pending)
end
end
context 'production' do
before do
allow(GitlabEdition).to receive(:jh?).and_return(false)

View File

@ -340,9 +340,18 @@ function retry_failed_rspec_examples() {
scripts/merge-reports "rspec/rspec-${CI_JOB_ID}.json" "${json_retry_file}"
junit_merge "${junit_retry_file}" "rspec/rspec-${CI_JOB_ID}.xml" --update-only
# The tests are flaky because they succeeded after being retried.
if [[ $rspec_run_status -eq 0 ]]; then
# The test is flaky because it succeeded after being retried.
# Make the pipeline "pass with warnings" if the flaky test is part of this MR.
# "53557338" is the project ID of https://gitlab.com/gitlab-org/quality/engineering-productivity/flaky-tests
if [ "$CREATE_RAILS_FLAKY_TEST_ISSUES" == "true" ]; then
bundle exec flaky-test-issues \
--token "${RAILS_FLAKY_TEST_PROJECT_TOKEN}" \
--project "53557338" \
--merge_request_iid "$CI_MERGE_REQUEST_IID" \
--input-files "rspec/rspec-retry-*.json" || true # We don't want this command to fail the job.
fi
# Make the pipeline "pass with warnings" if the flaky tests are part of this MR.
warn_on_successfully_retried_test
fi

View File

@ -2,7 +2,6 @@
FactoryBot.define do
factory :ci_reports_security_finding, class: '::Gitlab::Ci::Reports::Security::Finding' do
compare_key { "#{identifiers.first&.external_type}:#{identifiers.first&.external_id}:#{location.fingerprint}" }
confidence { :medium }
identifiers { Array.new(1) { association(:ci_reports_security_identifier) } }
location factory: :ci_reports_security_locations_sast

View File

@ -18,6 +18,10 @@ FactoryBot.define do
create(:namespace_settings, namespace: group) unless group.namespace_settings
end
trait :with_organization do
association :organization
end
trait :public do
visibility_level { Gitlab::VisibilityLevel::PUBLIC }
end

View File

@ -205,7 +205,10 @@ RSpec.describe 'Group', feature_category: :groups_and_projects do
describe 'not showing personalization questions on group creation when it is enabled' do
before do
stub_application_setting(hide_third_party_offers: true)
visit new_group_path(anchor: 'create-group-pane')
# If visiting directly via path, personalization setting is not being picked up correctly
visit new_group_path
click_link 'Create group'
end
it 'does not render personalization questions' do
@ -350,10 +353,16 @@ RSpec.describe 'Group', feature_category: :groups_and_projects do
visit path
end
it_behaves_like 'dirty submit form', [{ form: '.js-general-settings-form', input: 'input[name="group[name]"]', submit: 'button[type="submit"]' },
{ form: '.js-general-settings-form', input: '#group_visibility_level_0', submit: 'button[type="submit"]' },
{ form: '.js-general-permissions-form', input: '#group_request_access_enabled', submit: 'button[type="submit"]' },
{ form: '.js-general-permissions-form', input: 'input[name="group[two_factor_grace_period]"]', submit: 'button[type="submit"]' }]
it_behaves_like 'dirty submit form', [
{ form: '.js-general-settings-form', input: 'input[name="group[name]"]', submit: 'button[type="submit"]' },
{ form: '.js-general-settings-form', input: '#group_visibility_level_0', submit: 'button[type="submit"]' },
{ form: '.js-general-permissions-form', input: '#group_request_access_enabled', submit: 'button[type="submit"]' },
{
form: '.js-general-permissions-form',
input: 'input[name="group[two_factor_grace_period]"]',
submit: 'button[type="submit"]'
}
]
it 'saves new settings' do
page.within('.gs-general') do

View File

@ -1,5 +1,4 @@
import {
generateServicePortsString,
getDeploymentsStatuses,
getDaemonSetStatuses,
getStatefulSetStatuses,
@ -12,35 +11,6 @@ import {
import { CLUSTER_AGENT_ERROR_MESSAGES } from '~/environments/constants';
describe('k8s_integration_helper', () => {
describe('generateServicePortsString', () => {
const port = '8080';
const protocol = 'TCP';
const nodePort = '31732';
it('returns empty string if no ports provided', () => {
expect(generateServicePortsString([])).toBe('');
});
it('returns port and protocol when provided', () => {
expect(generateServicePortsString([{ port, protocol }])).toBe(`${port}/${protocol}`);
});
it('returns port, protocol and nodePort when provided', () => {
expect(generateServicePortsString([{ port, protocol, nodePort }])).toBe(
`${port}:${nodePort}/${protocol}`,
);
});
it('returns joined strings of ports if multiple are provided', () => {
expect(
generateServicePortsString([
{ port, protocol },
{ port, protocol, nodePort },
]),
).toBe(`${port}/${protocol}, ${port}:${nodePort}/${protocol}`);
});
});
describe('getDeploymentsStatuses', () => {
const pending = {
status: {

View File

@ -513,3 +513,87 @@ export const mockCronJobsTableItems = [
];
export const k8sCronJobsMock = [readyCronJob, suspendedCronJob, failedCronJob];
export const k8sServicesMock = [
{
metadata: {
name: 'my-first-service',
namespace: 'default',
creationTimestamp: '2023-07-31T11:50:17Z',
labels: {},
annotations: {},
},
spec: {
ports: [
{
name: 'https',
protocol: 'TCP',
port: 443,
targetPort: 8443,
},
],
clusterIP: '10.96.0.1',
externalIP: '-',
type: 'ClusterIP',
},
},
{
metadata: {
name: 'my-second-service',
namespace: 'default',
creationTimestamp: '2023-11-21T11:50:59Z',
labels: {},
annotations: {},
},
spec: {
ports: [
{
name: 'http',
protocol: 'TCP',
appProtocol: 'http',
port: 80,
targetPort: 'http',
nodePort: 31989,
},
{
name: 'https',
protocol: 'TCP',
appProtocol: 'https',
port: 443,
targetPort: 'https',
nodePort: 32679,
},
],
clusterIP: '10.105.219.238',
externalIP: '-',
type: 'NodePort',
},
},
];
export const mockServicesTableItems = [
{
name: 'my-first-service',
namespace: 'default',
type: 'ClusterIP',
clusterIP: '10.96.0.1',
externalIP: '-',
ports: '443/TCP',
age: '114d',
labels: {},
annotations: {},
kind: 'Service',
},
{
name: 'my-second-service',
namespace: 'default',
type: 'NodePort',
clusterIP: '10.105.219.238',
externalIP: '-',
ports: '80:31989/TCP, 443:32679/TCP',
age: '1d',
labels: {},
annotations: {},
kind: 'Service',
},
];

View File

@ -7,6 +7,7 @@ import k8sDashboardReplicaSetsQuery from '~/kubernetes_dashboard/graphql/queries
import k8sDashboardDaemonSetsQuery from '~/kubernetes_dashboard/graphql/queries/k8s_dashboard_daemon_sets.query.graphql';
import k8sDashboardJobsQuery from '~/kubernetes_dashboard/graphql/queries/k8s_dashboard_jobs.query.graphql';
import k8sDashboardCronJobsQuery from '~/kubernetes_dashboard/graphql/queries/k8s_dashboard_cron_jobs.query.graphql';
import k8sDashboardServicesQuery from '~/kubernetes_dashboard/graphql/queries/k8s_dashboard_services.query.graphql';
import {
k8sPodsMock,
k8sDeploymentsMock,
@ -15,6 +16,7 @@ import {
k8sDaemonSetsMock,
k8sJobsMock,
k8sCronJobsMock,
k8sServicesMock,
} from '../mock_data';
describe('~/frontend/environments/graphql/resolvers', () => {
@ -624,4 +626,86 @@ describe('~/frontend/environments/graphql/resolvers', () => {
).rejects.toThrow('API error');
});
});
describe('k8sServices', () => {
const client = { writeQuery: jest.fn() };
const mockWatcher = WatchApi.prototype;
const mockServicesListWatcherFn = jest.fn().mockImplementation(() => {
return Promise.resolve(mockWatcher);
});
const mockOnDataFn = jest.fn().mockImplementation((eventName, callback) => {
if (eventName === 'data') {
callback([]);
}
});
const mockServicesListFn = jest.fn().mockImplementation(() => {
return Promise.resolve({
items: k8sServicesMock,
});
});
const mockAllServicesListFn = jest.fn().mockImplementation(mockServicesListFn);
describe('when the Services data is present', () => {
beforeEach(() => {
jest
.spyOn(CoreV1Api.prototype, 'listCoreV1ServiceForAllNamespaces')
.mockImplementation(mockAllServicesListFn);
jest.spyOn(mockWatcher, 'subscribeToStream').mockImplementation(mockServicesListWatcherFn);
jest.spyOn(mockWatcher, 'on').mockImplementation(mockOnDataFn);
});
it('should request all Services from the cluster_client library and watch the events', async () => {
const Services = await mockResolvers.Query.k8sServices(
null,
{
configuration,
},
{ client },
);
expect(mockAllServicesListFn).toHaveBeenCalled();
expect(mockServicesListWatcherFn).toHaveBeenCalled();
expect(Services).toEqual(k8sServicesMock);
});
it('should update cache with the new data when received from the library', async () => {
await mockResolvers.Query.k8sServices(null, { configuration, namespace: '' }, { client });
expect(client.writeQuery).toHaveBeenCalledWith({
query: k8sDashboardServicesQuery,
variables: { configuration, namespace: '' },
data: { k8sServices: [] },
});
});
});
it('should not watch Services from the cluster_client library when the Services data is not present', async () => {
jest.spyOn(CoreV1Api.prototype, 'listCoreV1ServiceForAllNamespaces').mockImplementation(
jest.fn().mockImplementation(() => {
return Promise.resolve({
items: [],
});
}),
);
await mockResolvers.Query.k8sServices(null, { configuration }, { client });
expect(mockServicesListWatcherFn).not.toHaveBeenCalled();
});
it('should throw an error if the API call fails', async () => {
jest
.spyOn(CoreV1Api.prototype, 'listCoreV1ServiceForAllNamespaces')
.mockRejectedValue(new Error('API error'));
await expect(
mockResolvers.Query.k8sServices(null, { configuration }, { client }),
).rejects.toThrow('API error');
});
});
});

View File

@ -5,6 +5,7 @@ import {
calculateDaemonSetStatus,
calculateJobStatus,
calculateCronJobStatus,
generateServicePortsString,
} from '~/kubernetes_dashboard/helpers/k8s_integration_helper';
import { useFakeDate } from 'helpers/fake_date';
@ -140,4 +141,33 @@ describe('k8s_integration_helper', () => {
expect(calculateCronJobStatus(item)).toBe(expected);
});
});
describe('generateServicePortsString', () => {
const port = '8080';
const protocol = 'TCP';
const nodePort = '31732';
it('returns empty string if no ports provided', () => {
expect(generateServicePortsString([])).toBe('');
});
it('returns port and protocol when provided', () => {
expect(generateServicePortsString([{ port, protocol }])).toBe(`${port}/${protocol}`);
});
it('returns port, protocol and nodePort when provided', () => {
expect(generateServicePortsString([{ port, protocol, nodePort }])).toBe(
`${port}:${nodePort}/${protocol}`,
);
});
it('returns joined strings of ports if multiple are provided', () => {
expect(
generateServicePortsString([
{ port, protocol },
{ port, protocol, nodePort },
]),
).toBe(`${port}/${protocol}, ${port}:${nodePort}/${protocol}`);
});
});
});

View File

@ -0,0 +1,104 @@
import Vue from 'vue';
import VueApollo from 'vue-apollo';
import { shallowMount } from '@vue/test-utils';
import waitForPromises from 'helpers/wait_for_promises';
import createMockApollo from 'helpers/mock_apollo_helper';
import ServicesPage from '~/kubernetes_dashboard/pages/services_page.vue';
import WorkloadLayout from '~/kubernetes_dashboard/components/workload_layout.vue';
import { SERVICES_TABLE_FIELDS } from '~/kubernetes_dashboard/constants';
import { useFakeDate } from 'helpers/fake_date';
import { k8sServicesMock, mockServicesTableItems } from '../graphql/mock_data';
Vue.use(VueApollo);
describe('Kubernetes dashboard services page', () => {
let wrapper;
const configuration = {
basePath: 'kas/tunnel/url',
baseOptions: {
headers: { 'GitLab-Agent-Id': '1' },
},
};
const findWorkloadLayout = () => wrapper.findComponent(WorkloadLayout);
const createApolloProvider = () => {
const mockResolvers = {
Query: {
k8sServices: jest.fn().mockReturnValue(k8sServicesMock),
},
};
return createMockApollo([], mockResolvers);
};
const createWrapper = (apolloProvider = createApolloProvider()) => {
wrapper = shallowMount(ServicesPage, {
provide: { configuration },
apolloProvider,
});
};
describe('mounted', () => {
it('renders WorkloadLayout component', () => {
createWrapper();
expect(findWorkloadLayout().exists()).toBe(true);
});
it('sets loading prop for the WorkloadLayout', () => {
createWrapper();
expect(findWorkloadLayout().props('loading')).toBe(true);
});
it('removes loading prop from the WorkloadLayout when the list of services loaded', async () => {
createWrapper();
await waitForPromises();
expect(findWorkloadLayout().props('loading')).toBe(false);
});
});
describe('when gets services data', () => {
useFakeDate(2023, 10, 23, 10, 10);
it('sets correct stats object for the WorkloadLayout', async () => {
createWrapper();
await waitForPromises();
expect(findWorkloadLayout().props('stats')).toEqual([]);
});
it('sets correct table items object for the WorkloadLayout', async () => {
createWrapper();
await waitForPromises();
expect(findWorkloadLayout().props('items')).toMatchObject(mockServicesTableItems);
expect(findWorkloadLayout().props('fields')).toMatchObject(SERVICES_TABLE_FIELDS);
});
});
describe('when gets an error from the cluster_client API', () => {
const error = new Error('Error from the cluster_client API');
const createErroredApolloProvider = () => {
const mockResolvers = {
Query: {
k8sServices: jest.fn().mockRejectedValueOnce(error),
},
};
return createMockApollo([], mockResolvers);
};
beforeEach(async () => {
createWrapper(createErroredApolloProvider());
await waitForPromises();
});
it('sets errorMessage prop for the WorkloadLayout', () => {
expect(findWorkloadLayout().props('errorMessage')).toBe(error.message);
});
});
});

View File

@ -264,7 +264,7 @@ describe('MrWidgetOptions', () => {
expect(findMergePipelineForkAlert().exists()).toBe(false);
});
it('hides the alert when merge pipelines are not enabled', async () => {
it('hides the alert when merged results pipelines are not enabled', async () => {
createComponent({
updatedMrData: {
source_project_id: 1,
@ -275,7 +275,7 @@ describe('MrWidgetOptions', () => {
expect(findMergePipelineForkAlert().exists()).toBe(false);
});
it('shows the alert when merge pipelines are enabled and the source project and target project are different', async () => {
it('shows the alert when merged results pipelines are enabled and the source project and target project are different', async () => {
createComponent({
updatedMrData: {
source_project_id: 1,

View File

@ -44,6 +44,10 @@ describe('GlCountdown', () => {
it('displays 00:00:00', () => {
expect(wrapper.text()).toContain('00:00:00');
});
it('emits `timer-expired` event', () => {
expect(wrapper.emitted('timer-expired')).toStrictEqual([[]]);
});
});
describe('when an invalid date is passed', () => {

View File

@ -36,18 +36,6 @@ RSpec.describe Ci::Catalog::ResourcesHelper, feature_category: :pipeline_composi
end
end
describe '#can_view_namespace_catalog?' do
subject { helper.can_view_namespace_catalog?(project) }
before do
stub_licensed_features(ci_namespace_catalog: false)
end
it 'user cannot view the Catalog in CE regardless of permissions' do
expect(subject).to be false
end
end
describe '#js_ci_catalog_data' do
let(:project) { build(:project, :repository) }

View File

@ -29,6 +29,16 @@ RSpec.describe ClickHouse::Iterator, :click_house, feature_category: :database d
expect(collect_ids_with_batch_size(15)).to match_array(expected_values)
end
context 'when min value is given' do
let(:iterator) { described_class.new(query_builder: query_builder, connection: connection, min_value: 5) }
it 'iterates from the given min value' do
expected_values = (5..10).to_a
expect(collect_ids_with_batch_size(5)).to match_array(expected_values)
end
end
context 'when there are no records for the given query' do
let(:query_builder) do
ClickHouse::QueryBuilder

View File

@ -185,7 +185,7 @@ RSpec.describe Gitlab::Ci::Parsers::Security::Common, feature_category: :vulnera
context 'when name is provided' do
it 'sets name from the report as a name' do
finding = report.findings.find { |x| x.compare_key == 'CVE-1030' }
finding = report.findings.second
expected_name = Gitlab::Json.parse(finding.raw_metadata)['name']
expect(finding.name).to eq(expected_name)
@ -197,7 +197,8 @@ RSpec.describe Gitlab::Ci::Parsers::Security::Common, feature_category: :vulnera
let(:location) { nil }
it 'returns only identifier name' do
finding = report.findings.find { |x| x.compare_key == 'CVE-2017-11429' }
finding = report.findings.third
expect(finding.name).to eq("CVE-2017-11429")
end
end
@ -205,21 +206,24 @@ RSpec.describe Gitlab::Ci::Parsers::Security::Common, feature_category: :vulnera
context 'when location exists' do
context 'when CVE identifier exists' do
it 'combines identifier with location to create name' do
finding = report.findings.find { |x| x.compare_key == 'CVE-2017-11429' }
finding = report.findings.third
expect(finding.name).to eq("CVE-2017-11429 in yarn.lock")
end
end
context 'when CWE identifier exists' do
it 'combines identifier with location to create name' do
finding = report.findings.find { |x| x.compare_key == 'CWE-2017-11429' }
finding = report.findings.fourth
expect(finding.name).to eq("CWE-2017-11429 in yarn.lock")
end
end
context 'when neither CVE nor CWE identifier exist' do
it 'combines identifier with location to create name' do
finding = report.findings.find { |x| x.compare_key == 'OTHER-2017-11429' }
finding = report.findings.fifth
expect(finding.name).to eq("other-2017-11429 in yarn.lock")
end
end

View File

@ -2,7 +2,7 @@
require 'spec_helper'
RSpec.describe Gitlab::Ci::Reports::Security::Report do
RSpec.describe Gitlab::Ci::Reports::Security::Report, feature_category: :vulnerability_management do
let_it_be(:pipeline) { create(:ci_pipeline) }
let(:created_at) { 2.weeks.ago }
@ -89,7 +89,7 @@ RSpec.describe Gitlab::Ci::Reports::Security::Report do
let(:other_report) do
create(
:ci_reports_security_report,
findings: [create(:ci_reports_security_finding, compare_key: 'other_finding')],
findings: [create(:ci_reports_security_finding)],
scanners: [create(:ci_reports_security_scanner, external_id: 'other_scanner', name: 'Other Scanner')],
identifiers: [create(:ci_reports_security_identifier, external_id: 'other_id', name: 'other_scanner')]
)

View File

@ -50,18 +50,6 @@ RSpec.describe Gitlab::UsageDataCounters::CiTemplateUniqueCounter, feature_categ
end
end
context 'with implicit includes', :snowplow do
let(:config_source) { :auto_devops_source }
described_class.all_included_templates('Auto-DevOps.gitlab-ci.yml').each do |template_name|
context "for #{template_name}" do
let(:template_path) { Gitlab::Template::GitlabCiYmlTemplate.find(template_name.delete_suffix('.gitlab-ci.yml')).full_name }
include_examples 'tracks template'
end
end
end
it 'expands short template names' do
expect do
described_class.track_unique_project_event(project: project, template: 'Dependency-Scanning.gitlab-ci.yml', config_source: :repository_source, user: user)

View File

@ -1084,6 +1084,110 @@ RSpec.describe Member, feature_category: :groups_and_projects do
end
end
context 'for updating organization_users' do
let_it_be(:group) { create(:group, :with_organization) }
let(:member) { create(:group_member, source: group) }
let(:update_organization_users_enabled) { true }
before do
stub_feature_flags(update_organization_users: update_organization_users_enabled)
end
context 'when update_organization_users is enabled' do
it 'inserts new record on member creation' do
expect { member }.to change { Organizations::OrganizationUser.count }.by(1)
record_attrs = { organization: group.organization, user: member.user, access_level: :default }
expect(Organizations::OrganizationUser.exists?(record_attrs)).to be(true)
end
context 'when user already exists in the organization_users' do
context 'for an already existing default organization_user' do
let_it_be(:project) { create(:project, group: group, organization: group.organization) }
before do
member
end
it 'does not insert a new record in organization_users' do
expect do
create(:project_member, :owner, source: project, user: member.user)
end.not_to change { Organizations::OrganizationUser.count }
expect(
Organizations::OrganizationUser.exists?(
organization: project.organization, user: member.user, access_level: :default
)
).to be(true)
end
it 'does not update timestamps' do
travel_to(1.day.from_now) do
expect do
create(:project_member, :owner, source: project, user: member.user)
end.not_to change { Organizations::OrganizationUser.last.updated_at }
end
end
end
context 'for an already existing owner organization_user' do
let_it_be(:user) { create(:user) }
let_it_be(:common_attrs) { { organization: group.organization, user: user } }
before_all do
create(:organization_user, :owner, common_attrs)
end
it 'does not insert a new record in organization_users nor update the access_level' do
expect do
create(:group_member, :owner, source: group, user: user)
end.not_to change { Organizations::OrganizationUser.count }
expect(
Organizations::OrganizationUser.exists?(common_attrs.merge(access_level: :default))
).to be(false)
expect(
Organizations::OrganizationUser.exists?(common_attrs.merge(access_level: :owner))
).to be(true)
end
end
end
context 'when updating the organization_users is not successful' do
it 'rolls back the member creation' do
allow(Organizations::OrganizationUser).to receive(:upsert).once.and_raise(ActiveRecord::StatementTimeout)
expect { member }.to raise_error(ActiveRecord::StatementTimeout)
expect(Organizations::OrganizationUser.exists?(organization: group.organization)).to be(false)
expect(group.group_members).to be_empty
end
end
end
shared_examples_for 'does not create an organization_user entry' do
specify do
expect { member }.not_to change { Organizations::OrganizationUser.count }
end
end
context 'when update_organization_users is disabled' do
let(:update_organization_users_enabled) { false }
it_behaves_like 'does not create an organization_user entry'
end
context 'when member is an invite' do
let(:member) { create(:group_member, :invited, source: group) }
it_behaves_like 'does not create an organization_user entry'
end
context 'when organization does not exist' do
let(:member) { create(:group_member) }
it_behaves_like 'does not create an organization_user entry'
end
end
context 'when after_commit :update_highest_role' do
let_it_be(:user) { create(:user) }

View File

@ -3,6 +3,8 @@
require 'spec_helper'
RSpec.describe Users::PhoneNumberValidation, feature_category: :instance_resiliency do
using RSpec::Parameterized::TableSyntax
let_it_be(:user) { create(:user) }
let_it_be(:banned_user) { create(:user, :banned) }
@ -250,4 +252,43 @@ RSpec.describe Users::PhoneNumberValidation, feature_category: :instance_resilie
it { is_expected.to be_nil }
end
end
describe '.sms_send_allowed_after' do
let_it_be(:record) { create(:phone_number_validation, sms_send_count: 0) }
subject(:result) { record.sms_send_allowed_after }
context 'when there are no attempts yet' do
it { is_expected.to be_nil }
end
context 'when sms_send_wait_time feature flag is disabled' do
let_it_be(:record) { create(:phone_number_validation, sms_send_count: 1) }
before do
stub_feature_flags(sms_send_wait_time: false)
end
it { is_expected.to be_nil }
end
where(:attempt_number, :expected_delay) do
2 | 1.minute
3 | 3.minutes
4 | 5.minutes
5 | 10.minutes
6 | 10.minutes
end
with_them do
it 'returns the correct delayed timestamp value' do
freeze_time do
record.update!(sms_send_count: attempt_number - 1, sms_sent_at: Time.current)
expected_result = Time.current + expected_delay
expect(result).to eq expected_result
end
end
end
end
end

View File

@ -32,8 +32,19 @@ RSpec.describe Organizations::OrganizationPolicy, feature_category: :cell do
end
context 'when the user is part of the organization' do
before do
create :organization_user, organization: organization, user: current_user
before_all do
create(:organization_user, organization: organization, user: current_user)
end
it { is_expected.to be_disallowed(:admin_organization) }
it { is_expected.to be_allowed(:create_group) }
it { is_expected.to be_allowed(:read_organization) }
it { is_expected.to be_allowed(:read_organization_user) }
end
context 'when the user is an owner of the organization' do
before_all do
create(:organization_user, :owner, organization: organization, user: current_user)
end
it { is_expected.to be_allowed(:admin_organization) }

View File

@ -8,7 +8,7 @@ RSpec.describe Mutations::Organizations::Update, feature_category: :cell do
let_it_be(:user) { create(:user) }
let_it_be_with_reload(:organization) do
create(:organization) { |org| create(:organization_user, organization: org, user: user) }
create(:organization) { |org| create(:organization_user, :owner, organization: org, user: user) }
end
let(:mutation) { graphql_mutation(:organization_update, params) }

View File

@ -67,13 +67,13 @@ RSpec.describe 'getting organization information', feature_category: :cell do
it 'returns correct organization user fields' do
request_organization
organization_user_node = graphql_data_at(:organization, :organizationUsers, :nodes).first
organization_user_nodes = graphql_data_at(:organization, :organizationUsers, :nodes)
expected_attributes = {
"badges" => [{ "text" => "It's you!", "variant" => 'muted' }],
"id" => organization_user.to_global_id.to_s,
"user" => { "id" => user.to_global_id.to_s }
}
expect(organization_user_node).to match(expected_attributes)
expect(organization_user_nodes).to include(expected_attributes)
end
it 'avoids N+1 queries for all the fields' do

View File

@ -21,13 +21,13 @@ RSpec.describe Organizations::SettingsController, feature_category: :cell do
end
context 'when the user is signed in' do
let_it_be(:user) { create(:user) }
before do
sign_in(user)
end
context 'with no association to an organization' do
let_it_be(:user) { create(:user) }
it_behaves_like 'organization - not found response'
it_behaves_like 'organization - action disabled by `ui_for_organizations` feature flag'
end
@ -39,11 +39,18 @@ RSpec.describe Organizations::SettingsController, feature_category: :cell do
it_behaves_like 'organization - action disabled by `ui_for_organizations` feature flag'
end
context 'as an organization user' do
let_it_be(:user) { create :user }
context 'as a default organization user' do
before_all do
create(:organization_user, organization: organization, user: user)
end
before do
create :organization_user, organization: organization, user: user
it_behaves_like 'organization - not found response'
it_behaves_like 'organization - action disabled by `ui_for_organizations` feature flag'
end
context 'as an owner of an organization' do
before_all do
create(:organization_user, :owner, organization: organization, user: user)
end
it_behaves_like 'organization - successful response'

View File

@ -8,10 +8,6 @@ RSpec.describe Ci::Catalog::Resources::CreateService, feature_category: :pipelin
let(:service) { described_class.new(project, user) }
before do
stub_licensed_features(ci_namespace_catalog: true)
end
describe '#execute' do
context 'with an unauthorized user' do
it 'raises an AccessDeniedError' do

View File

@ -9,10 +9,6 @@ RSpec.describe Ci::Catalog::Resources::DestroyService, feature_category: :pipeli
let(:service) { described_class.new(project, user) }
before do
stub_licensed_features(ci_namespace_catalog: true)
end
describe '#execute' do
context 'with an unauthorized user' do
it 'raises an AccessDeniedError' do

View File

@ -32,7 +32,7 @@ RSpec.describe Organizations::UpdateService, feature_category: :cell do
context 'when user has permission' do
before_all do
create(:organization_user, organization: organization, user: current_user)
create(:organization_user, :owner, organization: organization, user: current_user)
end
shared_examples 'updating an organization' do

View File

@ -20,7 +20,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_1_primary, identifier_1_cve],
scanner: scanner_1,
severity: :low
severity: :low,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94610'
)
end
@ -29,7 +30,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_1_primary, identifier_1_cve],
scanner: scanner_1,
severity: :low
severity: :low,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94611'
)
end
@ -39,7 +41,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 32, end_line: 34),
scanner: scanner_2,
severity: :medium
severity: :medium,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94614'
)
end
@ -49,7 +52,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 32, end_line: 34),
scanner: scanner_2,
severity: :medium
severity: :medium,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94613'
)
end
@ -59,7 +63,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
identifiers: [identifier_2_primary, identifier_2_cve],
location: build(:ci_reports_security_locations_sast, start_line: 42, end_line: 44),
scanner: scanner_2,
severity: :medium
severity: :medium,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94612'
)
end
@ -68,7 +73,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_cwe],
scanner: scanner_3,
severity: :high
severity: :high,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94615'
)
end
@ -77,7 +83,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_cwe],
scanner: scanner_1,
severity: :critical
severity: :critical,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94616'
)
end
@ -86,7 +93,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_wasc],
scanner: scanner_1,
severity: :medium
severity: :medium,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94617'
)
end
@ -95,7 +103,8 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
:ci_reports_security_finding,
identifiers: [identifier_wasc],
scanner: scanner_2,
severity: :critical
severity: :critical,
uuid: '61eb8e3e-3be1-4d6c-ba26-4e0dd4f94618'
)
end
@ -226,9 +235,32 @@ RSpec.describe Security::MergeReportsService, '#execute', feature_category: :cod
let(:identifier_cve) { build(:ci_reports_security_identifier, external_id: 'CVE-2019-123', external_type: 'cve') }
let(:identifier_semgrep) { build(:ci_reports_security_identifier, external_id: 'rules.bandit.B105', external_type: 'semgrep_id') }
let(:finding_id_1) { build(:ci_reports_security_finding, identifiers: [identifier_bandit, identifier_cve], scanner: bandit_scanner, report_type: :sast) }
let(:finding_id_2) { build(:ci_reports_security_finding, identifiers: [identifier_cve], scanner: semgrep_scanner, report_type: :sast) }
let(:finding_id_3) { build(:ci_reports_security_finding, identifiers: [identifier_semgrep], scanner: semgrep_scanner, report_type: :sast) }
let(:finding_id_1) do
build(
:ci_reports_security_finding,
identifiers: [identifier_bandit, identifier_cve],
scanner: bandit_scanner,
report_type: :sast,
uuid: '21ab978a-7052-5428-af0b-c7a4b3fe5020')
end
let(:finding_id_2) do
build(
:ci_reports_security_finding,
identifiers: [identifier_cve],
scanner: semgrep_scanner,
report_type: :sast,
uuid: '21ab978a-7052-5428-af0b-c7a4b3fe5021')
end
let(:finding_id_3) do
build(
:ci_reports_security_finding,
identifiers: [identifier_semgrep],
scanner: semgrep_scanner,
report_type: :sast,
uuid: '21ab978a-7052-5428-af0b-c7a4b3fe5022')
end
let(:bandit_report) do
build(:ci_reports_security_report,

View File

@ -17,7 +17,6 @@ RSpec.describe IpCidrArrayValidator, feature_category: :shared do
using RSpec::Parameterized::TableSyntax
# noinspection RubyMismatchedArgumentType - https://handbook.gitlab.com/handbook/tools-and-tips/editors-and-ides/jetbrains-ides/tracked-jetbrains-issues/#ruby-32041
where(:cidr_array, :validity, :errors) do
# rubocop:disable Layout/LineLength -- The RSpec table syntax often requires long lines for errors
nil | false | { cidr_array: ["must be an array of CIDR values"] }

View File

@ -0,0 +1,104 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe ClickHouse::EventAuthorsConsistencyCronWorker, feature_category: :value_stream_management do
let(:worker) { described_class.new }
context 'when ClickHouse is disabled' do
it 'does nothing' do
allow(ClickHouse::Client).to receive(:database_configured?).and_return(false)
expect(worker).not_to receive(:log_extra_metadata_on_done)
worker.perform
end
end
context 'when the event_sync_worker_for_click_house feature flag is off' do
it 'does nothing' do
allow(ClickHouse::Client).to receive(:database_configured?).and_return(true)
stub_feature_flags(event_sync_worker_for_click_house: false)
expect(worker).not_to receive(:log_extra_metadata_on_done)
worker.perform
end
end
context 'when ClickHouse is available', :click_house do
let_it_be(:connection) { ClickHouse::Connection.new(:main) }
let_it_be_with_reload(:user1) { create(:user) }
let_it_be_with_reload(:user2) { create(:user) }
let(:leftover_author_ids) { connection.select('SELECT DISTINCT author_id FROM events FINAL').pluck('author_id') }
let(:deleted_user_id1) { user2.id + 1 }
let(:deleted_user_id2) { user2.id + 2 }
before do
insert_query = <<~SQL
INSERT INTO events (id, author_id) VALUES
(1, #{user1.id}),
(2, #{user2.id}),
(3, #{deleted_user_id1}),
(4, #{deleted_user_id1}),
(5, #{deleted_user_id2})
SQL
connection.execute(insert_query)
end
it 'cleans up all inconsistent records in ClickHouse' do
worker.perform
expect(leftover_author_ids).to contain_exactly(user1.id, user2.id)
# the next job starts from the beginning of the table
expect(ClickHouse::SyncCursor.cursor_for(:event_authors_consistency_check)).to eq(0)
end
context 'when the previous job was not finished' do
it 'continues the processing from the cursor' do
ClickHouse::SyncCursor.update_cursor_for(:event_authors_consistency_check, deleted_user_id1)
worker.perform
# the previous records should remain
expect(leftover_author_ids).to contain_exactly(user1.id, user2.id)
end
end
context 'when processing stops due to the record clean up limit' do
it 'stores the last processed id value' do
User.where(id: [user1.id, user2.id]).delete_all
stub_const("#{described_class}::MAX_AUTHOR_DELETIONS", 2)
stub_const("#{described_class}::POSTGRESQL_BATCH_SIZE", 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:result,
{ status: :deletion_limit_reached, deletions: 2 })
worker.perform
expect(leftover_author_ids).to contain_exactly(deleted_user_id1, deleted_user_id2)
expect(ClickHouse::SyncCursor.cursor_for(:event_authors_consistency_check)).to eq(user2.id)
end
end
context 'when time limit is reached' do
it 'stops the processing earlier' do
stub_const("#{described_class}::POSTGRESQL_BATCH_SIZE", 1)
# stop at the third author_id
allow_next_instance_of(Analytics::CycleAnalytics::RuntimeLimiter) do |runtime_limiter|
allow(runtime_limiter).to receive(:over_time?).and_return(false, false, true)
end
expect(worker).to receive(:log_extra_metadata_on_done).with(:result, { status: :over_time, deletions: 1 })
worker.perform
expect(leftover_author_ids).to contain_exactly(user1.id, user2.id, deleted_user_id2)
end
end
end
end