Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-11-07 21:26:05 +00:00
parent bf483de9d2
commit b40ff326b9
50 changed files with 625 additions and 736 deletions

View File

@ -1713,14 +1713,18 @@
PROCESS_TEST_RESULTS: "true"
QA_SAVE_TEST_METRICS: "true"
.qa:rules:e2e-schedule-blocking:
.qa:rules:e2e-schedule-nightly:
rules:
- <<: *if-default-branch-schedule-nightly
variables:
<<: *qa-e2e-test-schedule-variables
KNAPSACK_GENERATE_REPORT: "true"
QA_RUN_IN_PARALLEL: "false"
COVERBAND_ENABLED: "true"
COVERBAND_ENABLED: "false"
.qa:rules:e2e-schedule-blocking:
rules:
- !reference [".qa:rules:e2e-schedule-nightly", rules]
- <<: *if-dot-com-gitlab-org-schedule
variables:
<<: *qa-e2e-test-schedule-variables
@ -1854,7 +1858,12 @@
- !reference [".qa:rules:e2e-blocking-base-before", rules]
- !reference [".prevent-tier-2-and-below", rules]
- !reference [".qa:rules:e2e-blocking-base-after", rules]
- !reference [".qa:rules:e2e-schedule-blocking", rules]
- !reference [".qa:rules:e2e-schedule-nightly", rules]
- <<: *if-dot-com-gitlab-org-schedule
variables:
<<: *qa-e2e-test-schedule-variables
KNAPSACK_GENERATE_REPORT: "false"
QA_RUN_IN_PARALLEL: "true"
.qa:rules:test-on-omnibus-nightly:
rules:

View File

@ -80,7 +80,6 @@ Gitlab/AvoidGitlabInstanceChecks:
- 'ee/lib/api/code_suggestions.rb'
- 'ee/lib/api/scim/instance_scim.rb'
- 'ee/lib/ee/api/namespaces.rb'
- 'ee/lib/ee/gitlab/background_migration/backfill_project_statistics_storage_size_without_pipeline_artifacts_size_job.rb'
- 'ee/lib/ee/gitlab/background_migration/create_compliance_standards_adherence.rb'
- 'ee/lib/ee/gitlab/gon_helper.rb'
- 'ee/lib/ee/gitlab/personal_access_tokens/service_account_token_validator.rb'

View File

@ -2174,7 +2174,6 @@ Layout/LineLength:
- 'lib/gitlab/auth/user_access_denied_reason.rb'
- 'lib/gitlab/background_migration/backfill_issue_search_data.rb'
- 'lib/gitlab/background_migration/backfill_iteration_cadence_id_for_boards.rb'
- 'lib/gitlab/background_migration/backfill_project_statistics_storage_size_without_pipeline_artifacts_size_job.rb'
- 'lib/gitlab/background_migration/backfill_snippet_repositories.rb'
- 'lib/gitlab/background_migration/backfill_workspace_personal_access_token.rb'
- 'lib/gitlab/background_migration/batching_strategies/backfill_project_namespace_per_group_batching_strategy.rb'

View File

@ -530,8 +530,8 @@
{"name":"pry-byebug","version":"3.10.1","platform":"ruby","checksum":"c8f975c32255bfdb29e151f5532130be64ff3d0042dc858d0907e849125581f8"},
{"name":"pry-rails","version":"0.3.9","platform":"ruby","checksum":"468662575abb6b67f4a9831219f99290d5eae7bf186e64dd810d0a3e4a8cc4b1"},
{"name":"pry-shell","version":"0.6.4","platform":"ruby","checksum":"ad024882d29912b071a7de65ebea538b242d2dc1498c60c7c2352ef94769f208"},
{"name":"psych","version":"5.1.2","platform":"java","checksum":"1dd68dc609eddbc884e6892e11da942e16f7256bd30ebde9d35449d43043a6fe"},
{"name":"psych","version":"5.1.2","platform":"ruby","checksum":"337322f58fc2bf24827d2b9bd5ab595f6a72971867d151bb39980060ea40a368"},
{"name":"psych","version":"5.2.0","platform":"java","checksum":"da3a7995e652365faa210d7658a291141c9a15bf05a4d9a48a13856b04f36960"},
{"name":"psych","version":"5.2.0","platform":"ruby","checksum":"6603fe756bcaf14daa25bc17625f36c90931dcf70452ac1e8da19760dc310573"},
{"name":"public_suffix","version":"6.0.1","platform":"ruby","checksum":"61d44e1cab5cbbbe5b31068481cf16976dd0dc1b6b07bd95617ef8c5e3e00c6f"},
{"name":"puma","version":"6.4.3","platform":"java","checksum":"373fcfacacaafd0f5a24db18cb99b3f2decb5c5316470169852559aa80adc8ab"},
{"name":"puma","version":"6.4.3","platform":"ruby","checksum":"24a4645c006811d83f2480057d1f54a96e7627b6b90e1c99b260b9dc630eb43e"},
@ -710,8 +710,8 @@
{"name":"state_machines","version":"0.5.0","platform":"ruby","checksum":"23e6249d374a920b528dccade403518b4abbd83841a3e2c9ef13e6f1a009b102"},
{"name":"state_machines-activemodel","version":"0.8.0","platform":"ruby","checksum":"e932dab190d4be044fb5f9cab01a3ea0b092c5f113d4676c6c0a0d49bf738d2c"},
{"name":"state_machines-activerecord","version":"0.8.0","platform":"ruby","checksum":"072fb701b8ab03de0608297f6c55dc34ed096e556fa8f77e556f3c461c71aab6"},
{"name":"stringio","version":"3.1.1","platform":"java","checksum":"110b4b76dbbcc9e7aab5d0fa34fbc82e3ab1ce851b2804517fa8e1e1bd45eb9b"},
{"name":"stringio","version":"3.1.1","platform":"ruby","checksum":"53456e14175c594e0e8eb2206a1be33f3974d4fe21c131e628908b05c8c2ae1e"},
{"name":"stringio","version":"3.1.2","platform":"java","checksum":"8a11a30ec257e6d9851a42dacb968b07a56bf2cfe359b2d906ec1f8774ac7d71"},
{"name":"stringio","version":"3.1.2","platform":"ruby","checksum":"204f1828f85cdb39d57cac4abc6dc44b04505a223f131587f2e20ae3729ba131"},
{"name":"strings","version":"0.2.1","platform":"ruby","checksum":"933293b3c95cf85b81eb44b3cf673e3087661ba739bbadfeadf442083158d6fb"},
{"name":"strings-ansi","version":"0.2.0","platform":"ruby","checksum":"90262d760ea4a94cc2ae8d58205277a343409c288cbe7c29416b1826bd511c88"},
{"name":"swd","version":"2.0.3","platform":"ruby","checksum":"4cdbe2a4246c19f093fce22e967ec3ebdd4657d37673672e621bf0c7eb770655"},

View File

@ -1462,7 +1462,7 @@ GEM
pry (>= 0.13.0)
tty-markdown
tty-prompt
psych (5.1.2)
psych (5.2.0)
stringio
public_suffix (6.0.1)
puma (6.4.3)
@ -1812,7 +1812,7 @@ GEM
state_machines-activerecord (0.8.0)
activerecord (>= 5.1)
state_machines-activemodel (>= 0.8.0)
stringio (3.1.1)
stringio (3.1.2)
strings (0.2.1)
strings-ansi (~> 0.2)
unicode-display_width (>= 1.5, < 3.0)

View File

@ -3,10 +3,12 @@ import { GlLoadingIcon, GlAlert, GlEmptyState, GlSprintf, GlIcon } from '@gitlab
import EmptyStateSvg from '@gitlab/svgs/dist/illustrations/status/status-nothing-md.svg';
import k8sLogsQuery from '~/environments/graphql/queries/k8s_logs.query.graphql';
import environmentClusterAgentQuery from '~/environments/graphql/queries/environment_cluster_agent.query.graphql';
import abortK8sPodLogsStream from '~/environments/graphql/mutations/abort_pod_logs_stream.mutation.graphql';
import { createK8sAccessConfiguration } from '~/environments/helpers/k8s_integration_helper';
import LogsViewer from '~/vue_shared/components/logs_viewer/logs_viewer.vue';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
import { s__, __ } from '~/locale';
import { fetchPolicies } from '~/lib/graphql';
export default {
components: {
@ -45,11 +47,14 @@ export default {
data() {
return {
environmentError: null,
k8sLogs: null,
environment: null,
};
},
apollo: {
// eslint-disable-next-line @gitlab/vue-no-undef-apollo-properties
k8sLogs: {
fetchPolicy: fetchPolicies.NETWORK_ONLY,
nextFetchPolicy: fetchPolicies.CACHE_FIRST,
query: k8sLogsQuery,
variables() {
return {
@ -63,7 +68,6 @@ export default {
return Boolean(!this.gitlabAgentId);
},
},
// eslint-disable-next-line @gitlab/vue-no-undef-apollo-properties
environment: {
query: environmentClusterAgentQuery,
variables() {
@ -131,6 +135,17 @@ export default {
return data;
},
},
beforeDestroy() {
this.$apollo.mutate({
mutation: abortK8sPodLogsStream,
variables: {
configuration: this.k8sAccessConfiguration,
namespace: this.namespace,
podName: this.podName,
containerName: this.containerName,
},
});
},
i18n: {
emptyStateTitleForPod: s__('KubernetesLogs|No logs available for pod %{podName}'),
emptyStateTitleForContainer: s__(

View File

@ -14,6 +14,7 @@ import k8sNamespacesQuery from './queries/k8s_namespaces.query.graphql';
import fluxKustomizationQuery from './queries/flux_kustomization.query.graphql';
import fluxHelmReleaseQuery from './queries/flux_helm_release.query.graphql';
import k8sEventsQuery from './queries/k8s_events.query.graphql';
import k8sPodLogsWatcherQuery from './queries/k8s_pod_logs_watcher.query.graphql';
import { resolvers } from './resolvers';
import typeDefs from './typedefs.graphql';
import { connectionStatus } from './resolvers/kubernetes/constants';
@ -183,6 +184,15 @@ export const apolloProvider = (endpoint) => {
},
});
cache.writeQuery({
query: k8sPodLogsWatcherQuery,
data: {
k8sPodLogsWatcher: {
watcher: null,
},
},
});
return new VueApollo({
defaultClient,
});

View File

@ -0,0 +1,15 @@
mutation abortK8sPodLogsStream(
$configuration: LocalConfiguration
$namespace: String
$podName: String
$containerName: String
) {
abortK8sPodLogsStream(
configuration: $configuration
namespace: $namespace
podName: $podName
containerName: $containerName
) @client {
errors
}
}

View File

@ -0,0 +1,15 @@
query k8sPodLogsWatcherQuery(
$configuration: LocalConfiguration
$namespace: String
$podName: String
$containerName: String
) {
k8sPodLogsWatcher(
configuration: $configuration
namespace: $namespace
podName: $podName
containerName: $containerName
) @client {
watcher
}
}

View File

@ -22,7 +22,7 @@ import k8sServicesQuery from '../../queries/k8s_services.query.graphql';
import k8sDeploymentsQuery from '../../queries/k8s_deployments.query.graphql';
import k8sEventsQuery from '../../queries/k8s_events.query.graphql';
import { k8sResourceType } from './constants';
import { k8sLogs } from './k8s_logs';
import { k8sLogs, k8sPodLogsWatcher, abortK8sPodLogsStream } from './k8s_logs';
const watchServices = ({ configuration, namespace, client }) => {
const query = k8sServicesQuery;
@ -185,6 +185,7 @@ export const kubernetesMutations = {
return buildKubernetesErrors([error]);
});
},
abortK8sPodLogsStream,
};
export const kubernetesQueries = {
@ -284,4 +285,5 @@ export const kubernetesQueries = {
});
},
k8sLogs,
k8sPodLogsWatcher,
};

View File

@ -7,6 +7,7 @@ import {
} from '@gitlab/cluster-client';
import { throttle } from 'lodash';
import k8sLogsQuery from '~/environments/graphql/queries/k8s_logs.query.graphql';
import k8sPodLogsWatcherQuery from '~/environments/graphql/queries/k8s_pod_logs_watcher.query.graphql';
export const buildWatchPath = ({ resource, api = 'api/v1', namespace = '' }) => {
return `/${api}/namespaces/${namespace}/pods/${resource}/log`;
@ -62,6 +63,12 @@ export const k8sLogs = (_, { configuration, namespace, podName, containerName },
watchApi
.subscribeToStream(watchPath, watchQuery)
.then((watcher) => {
client.writeQuery({
query: k8sPodLogsWatcherQuery,
data: { k8sPodLogsWatcher: { watcher } },
variables,
});
let logsData = [];
const writeLogsThrottled = throttle(() => {
const currentLogsData = cacheWrapper.readLogsData();
@ -89,3 +96,18 @@ export const k8sLogs = (_, { configuration, namespace, podName, containerName },
cacheWrapper.writeErrorData(err);
});
};
export const abortK8sPodLogsStream = (
_,
{ configuration, namespace, podName, containerName },
{ client },
) => {
const podLogsWatcher = client.readQuery({
query: k8sPodLogsWatcherQuery,
variables: { configuration, namespace, podName, containerName },
})?.k8sPodLogsWatcher?.watcher;
podLogsWatcher?.abortStream();
};
export const k8sPodLogsWatcher = () => ({ watcher: null });

View File

@ -127,6 +127,10 @@ type K8sEvent {
type: String
}
type k8sWatcher {
watcher: JSON
}
extend type Query {
environmentApp(page: Int, scope: String): LocalEnvironmentApp
folder(environment: NestedLocalEnvironmentInput): LocalEnvironmentFolder
@ -151,6 +155,12 @@ extend type Query {
namespace: String
involvedObjectName: String
): [K8sEvent]
k8sPodLogsWatcher(
configuration: LocalConfiguration
namespace: String
podName: String
containerName: String
): k8sWatcher
}
input ResourceTypeParam {
@ -178,4 +188,10 @@ extend type Mutation {
namespace: String
podName: String
): LocalErrors
abortPodLogsStream(
configuration: LocalConfiguration
namespace: String
podName: String
containerName: String
): LocalErrors
}

View File

@ -1,13 +1,13 @@
<script>
import { GlSegmentedControl } from '@gitlab/ui';
import { s__, sprintf } from '~/locale';
import SegmentedControlButtonGroup from '~/vue_shared/components/segmented_control_button_group.vue';
import CiCdAnalyticsAreaChart from './ci_cd_analytics_area_chart.vue';
import { DEFAULT_SELECTED_CHART } from './constants';
export default {
components: {
CiCdAnalyticsAreaChart,
SegmentedControlButtonGroup,
GlSegmentedControl,
},
props: {
charts: {
@ -56,11 +56,7 @@ export default {
<template>
<div>
<div class="gl-flex gl-flex-wrap gl-gap-5">
<segmented-control-button-group
:options="chartRanges"
:value="selectedChart"
@input="onInput"
/>
<gl-segmented-control :options="chartRanges" :value="selectedChart" @input="onInput" />
<slot name="extend-button-group"></slot>
</div>
<ci-cd-analytics-area-chart

View File

@ -1,52 +0,0 @@
<script>
import { GlButtonGroup, GlButton } from '@gitlab/ui';
const validateOptionsProp = (options) => {
const requiredOptionPropType = {
value: ['string', 'number', 'boolean'],
disabled: ['boolean', 'undefined'],
};
const optionProps = Object.keys(requiredOptionPropType);
return options.every((option) => {
if (!option) {
return false;
}
return optionProps.every((name) => requiredOptionPropType[name].includes(typeof option[name]));
});
};
// TODO: We're planning to move this component to GitLab UI
// https://gitlab.com/gitlab-org/gitlab-ui/-/issues/1787
export default {
components: {
GlButtonGroup,
GlButton,
},
props: {
options: {
type: Array,
required: true,
validator: validateOptionsProp,
},
value: {
type: [String, Number, Boolean],
required: true,
},
},
};
</script>
<template>
<gl-button-group>
<gl-button
v-for="opt in options"
:key="opt.value"
:disabled="!!opt.disabled"
:selected="value === opt.value"
v-bind="opt.props"
@click="$emit('input', opt.value)"
>
<slot name="button-content" v-bind="opt">{{ opt.text }}</slot>
</gl-button>
</gl-button-group>
</template>

View File

@ -0,0 +1,11 @@
# frozen_string_literal: true
module Members
class DeletionSchedule < ApplicationRecord
self.table_name = 'members_deletion_schedules'
belongs_to :namespace, optional: false
belongs_to :user, optional: false
belongs_to :scheduled_by, class_name: 'User', optional: false
end
end

View File

@ -1,8 +0,0 @@
---
migration_job_name: BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob
description: Refreshes ProjectStatistics to remove pipeline_artifacts_size from the
total storage_size
feature_category: consumables_cost_management
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/126053
milestone: '16.3'
finalized_by: '20231207220954'

View File

@ -0,0 +1,12 @@
---
table_name: members_deletion_schedules
classes:
- Members::DeletionSchedule
feature_categories:
- seat_cost_management
description: Keeps deletion schedules for members
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/170268
milestone: '17.6'
gitlab_schema: gitlab_main_cell
sharding_key:
namespace_id: namespaces

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
class CreateMembersDeletionSchedules < Gitlab::Database::Migration[2.2]
milestone '17.6'
def up
create_table :members_deletion_schedules do |t|
t.references :namespace, index: false, foreign_key: { to_table: :namespaces, on_delete: :cascade }, null: false
t.references :user, index: true, foreign_key: { to_table: :users, on_delete: :cascade }, null: false
t.references :scheduled_by, null: false, index: true, foreign_key: { to_table: :users, on_delete: :cascade }
t.timestamps_with_timezone null: false
t.index [:namespace_id, :user_id], unique: true,
name: 'idx_members_deletion_schedules_on_namespace_id_and_user_id'
end
end
def down
drop_table :members_deletion_schedules, if_exists: true
end
end

View File

@ -0,0 +1,52 @@
# frozen_string_literal: true
class SyncForeignKeyValidationForPipelinesUpstreamPipelineId < Gitlab::Database::Migration[2.2]
include Gitlab::Database::PartitioningMigrationHelpers
disable_ddl_transaction!
milestone '17.6'
SOURCE_TABLE_NAME = :p_ci_builds
TARGET_TABLE_NAME = :p_ci_pipelines
COLUMN = :upstream_pipeline_id
PARTITION_COLUMN = :upstream_pipeline_partition_id
TARGET_COLUMN = :id
TARGET_PARTITION_COLUMN = :partition_id
FK_NAME = :fk_rails_4540ead625_p
def up
add_concurrent_partitioned_foreign_key(
SOURCE_TABLE_NAME,
TARGET_TABLE_NAME,
column: [PARTITION_COLUMN, COLUMN],
target_column: [TARGET_PARTITION_COLUMN, TARGET_COLUMN],
validate: true,
reverse_lock_order: true,
on_update: :cascade,
on_delete: :cascade,
name: FK_NAME
)
end
def down
with_lock_retries do
remove_foreign_key_if_exists(
SOURCE_TABLE_NAME, TARGET_TABLE_NAME,
name: FK_NAME,
reverse_lock_order: true
)
end
add_concurrent_partitioned_foreign_key(
SOURCE_TABLE_NAME,
TARGET_TABLE_NAME,
column: [PARTITION_COLUMN, COLUMN],
target_column: [TARGET_PARTITION_COLUMN, TARGET_COLUMN],
validate: false,
reverse_lock_order: true,
on_update: :cascade,
on_delete: :cascade,
name: FK_NAME
)
end
end

View File

@ -0,0 +1 @@
59da038d0508ecfcd7554e6b7c7929a86c34d711b5b08a89020da86feced41d2

View File

@ -0,0 +1 @@
3db2da1dd65771081ead6b0c16fca5e4e89c7f06da45fa869cfdf4eaff837dc3

View File

@ -14014,6 +14014,24 @@ CREATE TABLE members (
CONSTRAINT check_508774aac0 CHECK ((member_namespace_id IS NOT NULL))
);
CREATE TABLE members_deletion_schedules (
id bigint NOT NULL,
namespace_id bigint NOT NULL,
user_id bigint NOT NULL,
scheduled_by_id bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL
);
CREATE SEQUENCE members_deletion_schedules_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE members_deletion_schedules_id_seq OWNED BY members_deletion_schedules.id;
CREATE SEQUENCE members_id_seq
START WITH 1
INCREMENT BY 1
@ -23136,6 +23154,8 @@ ALTER TABLE ONLY member_roles ALTER COLUMN id SET DEFAULT nextval('member_roles_
ALTER TABLE ONLY members ALTER COLUMN id SET DEFAULT nextval('members_id_seq'::regclass);
ALTER TABLE ONLY members_deletion_schedules ALTER COLUMN id SET DEFAULT nextval('members_deletion_schedules_id_seq'::regclass);
ALTER TABLE ONLY merge_request_assignees ALTER COLUMN id SET DEFAULT nextval('merge_request_assignees_id_seq'::regclass);
ALTER TABLE ONLY merge_request_assignment_events ALTER COLUMN id SET DEFAULT nextval('merge_request_assignment_events_id_seq'::regclass);
@ -25473,6 +25493,9 @@ ALTER TABLE ONLY member_approvals
ALTER TABLE ONLY member_roles
ADD CONSTRAINT member_roles_pkey PRIMARY KEY (id);
ALTER TABLE ONLY members_deletion_schedules
ADD CONSTRAINT members_deletion_schedules_pkey PRIMARY KEY (id);
ALTER TABLE ONLY members
ADD CONSTRAINT members_pkey PRIMARY KEY (id);
@ -28108,6 +28131,8 @@ CREATE INDEX idx_keys_expires_at_and_before_expiry_notification_undelivered ON k
CREATE INDEX idx_members_created_at_user_id_invite_token ON members USING btree (created_at) WHERE ((invite_token IS NOT NULL) AND (user_id IS NULL));
CREATE UNIQUE INDEX idx_members_deletion_schedules_on_namespace_id_and_user_id ON members_deletion_schedules USING btree (namespace_id, user_id);
CREATE INDEX idx_members_on_user_and_source_and_source_type_and_member_role ON members USING btree (user_id, source_id, source_type, member_role_id);
CREATE INDEX idx_merge_request_metrics_on_merged_by_project_and_mr ON merge_request_metrics USING btree (merged_by_id, target_project_id, merge_request_id);
@ -30346,6 +30371,10 @@ CREATE INDEX index_member_roles_on_occupies_seat ON member_roles USING btree (oc
CREATE INDEX index_member_roles_on_permissions ON member_roles USING gin (permissions);
CREATE INDEX index_members_deletion_schedules_on_scheduled_by_id ON members_deletion_schedules USING btree (scheduled_by_id);
CREATE INDEX index_members_deletion_schedules_on_user_id ON members_deletion_schedules USING btree (user_id);
CREATE INDEX index_members_on_access_level ON members USING btree (access_level);
CREATE INDEX index_members_on_expires_at ON members USING btree (expires_at);
@ -37107,8 +37136,8 @@ ALTER TABLE ONLY merge_request_assignees
ALTER TABLE ONLY packages_dependency_links
ADD CONSTRAINT fk_rails_4437bf4070 FOREIGN KEY (dependency_id) REFERENCES packages_dependencies(id) ON DELETE CASCADE;
ALTER TABLE ONLY ci_builds
ADD CONSTRAINT fk_rails_4540ead625_p FOREIGN KEY (upstream_pipeline_partition_id, upstream_pipeline_id) REFERENCES p_ci_pipelines(partition_id, id) ON UPDATE CASCADE ON DELETE CASCADE NOT VALID;
ALTER TABLE p_ci_builds
ADD CONSTRAINT fk_rails_4540ead625_p FOREIGN KEY (upstream_pipeline_partition_id, upstream_pipeline_id) REFERENCES p_ci_pipelines(partition_id, id) ON UPDATE CASCADE ON DELETE CASCADE;
ALTER TABLE ONLY project_auto_devops
ADD CONSTRAINT fk_rails_45436b12b2 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -37677,6 +37706,9 @@ ALTER TABLE ONLY project_secrets_managers
ALTER TABLE ONLY organization_details
ADD CONSTRAINT fk_rails_8facb04bef FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE;
ALTER TABLE ONLY members_deletion_schedules
ADD CONSTRAINT fk_rails_8fb4cda076 FOREIGN KEY (scheduled_by_id) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE p_ci_pipelines_config
ADD CONSTRAINT fk_rails_906c9a2533_p FOREIGN KEY (partition_id, pipeline_id) REFERENCES p_ci_pipelines(partition_id, id) ON UPDATE CASCADE ON DELETE CASCADE;
@ -37746,6 +37778,9 @@ ALTER TABLE ONLY pages_deployments
ALTER TABLE ONLY dast_pre_scan_verification_steps
ADD CONSTRAINT fk_rails_9990fc2adf FOREIGN KEY (dast_pre_scan_verification_id) REFERENCES dast_pre_scan_verifications(id) ON DELETE CASCADE;
ALTER TABLE ONLY members_deletion_schedules
ADD CONSTRAINT fk_rails_9af19961f8 FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE;
ALTER TABLE ONLY users_ops_dashboard_projects
ADD CONSTRAINT fk_rails_9b4ebf005b FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE;
@ -38115,6 +38150,9 @@ ALTER TABLE ONLY namespace_details
ALTER TABLE ONLY operations_strategies_user_lists
ADD CONSTRAINT fk_rails_ccb7e4bc0b FOREIGN KEY (user_list_id) REFERENCES operations_user_lists(id) ON DELETE CASCADE;
ALTER TABLE ONLY members_deletion_schedules
ADD CONSTRAINT fk_rails_ce06d97eb2 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
ALTER TABLE ONLY resource_milestone_events
ADD CONSTRAINT fk_rails_cedf8cce4d FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL;

View File

@ -48,7 +48,7 @@ For all other instance configurations, submit a support ticket according to the
### Apply configuration changes in Switchboard
You can apply configuration changes made in Switchboard immediately or defer them until your next scheduled weekly [maintenance window](../../administration/dedicated/create_instance.md#maintenance-window).
You can apply configuration changes made in Switchboard immediately or defer them until your next scheduled weekly [maintenance window](../../administration/dedicated/maintenance.md#maintenance-windows).
When you apply changes immediately:

View File

@ -179,7 +179,7 @@ information required to create your GitLab Dedicated instance.
This can be the same as the primary or secondary region, or different.
- Desired maintenance window: A weekly four-hour time slot that GitLab uses to perform routine
maintenance and upgrade operations on all tenant instances. For more information, see
[maintenance windows](../../administration/dedicated/create_instance.md#maintenance-window).
[maintenance windows](../../administration/dedicated/maintenance.md#maintenance-windows).
1. Optional. Security: You can provide your own [KMS keys](https://docs.aws.amazon.com/kms/latest/developerguide/overview.html)
for encrypted AWS services. If you choose not to provide KMS keys, encryption keys are generated
for your instance when it is created. For more information, see [encrypting your data at rest](#encrypted-data-at-rest-byok).
@ -205,55 +205,3 @@ Also plan ahead if you need the following features:
- [Outbound Private Link](../../administration/dedicated/configure_instance.md#outbound-private-link)
- [SAML SSO](../../administration/dedicated/configure_instance.md#saml)
- [Bring your own domain](../../administration/dedicated/configure_instance.md#bring-your-own-domain-byod)
## Things to know
### Maintenance window
Available scheduled maintenance windows, performed outside standard working hours:
- APAC: Wednesday 1 PM - 5 PM UTC
- EMEA: Tuesday 1 AM - 5 AM UTC
- AMER Option 1: Tuesday 7 AM - 11 AM UTC
- AMER Option 2: Sunday 9 PM - Monday 1 AM UTC
Consider the following notes:
- The Dedicated instance is not expected to be down the entire duration of the maintenance window. Occasionally, a small period of downtime (on the order of a few tens of seconds) can occur while compute resources restart after they are upgraded. If it occurs, this small period of downtime typically happens during the first half of the maintenance window. Long-running connections may be interrupted during this period. To mitigate this, clients should implement strategies like automatic recovery and retry. Longer periods of downtime during the maintenance window are rare, and GitLab provides notice if longer downtime is anticipated.
- In case of a performance degradation or downtime during the scheduled maintenance window,
the impact to [the system SLA](https://handbook.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/) is not counted.
- The weekly scheduled maintenance window can be postponed into another window within the same week.
This option needs to be agreed with the assigned Customer Success Manager at least one week in advance.
- The scheduled weekly maintenance window is different from
[emergency maintenance](#emergency-maintenance).
#### GitLab release rollout schedule
GitLab Dedicated tenant instances are [upgraded](../../subscriptions/gitlab_dedicated/maintenance.md#upgrades-and-patches) to the minor GitLab release within [the pre-selected window](#maintenance-window) using the schedule described below.
Where **T** is the date of a [minor GitLab release](../../policy/maintenance.md) `N`. GitLab Dedicated instances are upgraded to the `N-1` release as follows:
1. At T+5 calendar days: Tenant instances in the `EMEA` and `AMER Option 1` maintenance window are upgraded.
1. At T+6 calendar days: Tenant instances in the `APAC` maintenance window are upgraded.
1. At T+10 calendar days: Tenant instances in the `AMER Option 2` maintenance window are upgraded.
For example, GitLab 16.9 released on 2024-02-15. Therefore, tenant instances in the `EMEA` and `AMER Option 1` maintenance window are upgraded to 16.8 on 2024-02-20.
#### Emergency maintenance
In an event of a platform outage, degradation or a security event requiring urgent action,
emergency maintenance will be carried out per
[the emergency change processes](https://handbook.gitlab.com/handbook/engineering/infrastructure/emergency-change-processes/).
The emergency maintenance is initiated when urgent actions need to be executed by GitLab on a
Dedicated tenant instance. Communication with the customer will be provided on best effort basis
prior to commencing the maintenance, and full communication will follow after the immediate action
is carried out. The GitLab Support Team will create a new ticket and send a message to the email
addresses of the users listed in Switchboard during [onboarding](../../administration/dedicated/create_instance.md#step-1-get-access-to-switchboard).
For example, when a critical security process is initiated to address an S1 vulnerability in GitLab,
emergency maintenance is carried out to upgrade GitLab to the non-vulnerable version and that
can occur outside of a scheduled maintenance window.
Postponing emergency maintenance is not possible, because the same process must be applied to all
existing Dedicated customers, and the primary concern is to ensure safety and availability of
Dedicated tenant instances.

View File

@ -0,0 +1,68 @@
---
stage: SaaS Platforms
group: GitLab Dedicated
description: Maintenance windows, release schedules, and emergency maintenance processes for GitLab Dedicated instances.
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# GitLab Dedicated maintenance and release schedule
DETAILS:
**Tier:** Ultimate
**Offering:** GitLab Dedicated
GitLab performs regular maintenance to your GitLab Dedicated instance. This page outlines the maintenance windows and release upgrade schedule.
## Maintenance windows
Maintenance is performed outside standard working hours:
| Region | Day | Time (UTC) |
|--------|-----|------------|
| Asia Pacific | Wednesday | 13:00 - 17:00 |
| Europe, Middle East, and Africa | Tuesday | 01:00 - 05:00 |
| Americas (Option 1) | Tuesday | 07:00 - 11:00 |
| Americas (Option 2) | Sunday-Monday | 21:00 - 01:00 |
You can postpone scheduled maintenance to another window in the same week by contacting your Customer Success Manager at least one week in advance.
NOTE:
The scheduled weekly maintenance window is separate from [emergency maintenance](#emergency-maintenance), which cannot be postponed.
### Access during maintenance
Downtime is not expected for the entire duration of your maintenance window. A brief service interruption (less than one minute) may occur when compute resources restart after upgrades, typically during the first half of the maintenance window.
Long-running connections may be interrupted during this period. To minimize disruption, you can implement strategies like automatic recovery and retry.
Longer service interruptions are rare. If extended downtime is expected, GitLab provides advance notice.
NOTE:
Performance degradation or downtime during the scheduled maintenance window does not count against [the system Service Level Availability](https://handbook.gitlab.com/handbook/engineering/infrastructure/team/gitlab-dedicated/slas/).
## Release rollout schedule
GitLab Dedicated is [upgraded](../../subscriptions/gitlab_dedicated/maintenance.md#upgrades-and-patches) to the previous minor version (`N-1`) after each GitLab release. For example, when GitLab 16.9 is released, GitLab Dedicated instances are upgraded to 16.8.
Upgrades occur in your selected [maintenance window](#maintenance-windows) according to the following schedule, where `T` is the date of a [minor GitLab release](../../policy/maintenance.md):
| Calendar days after release | Maintenance window region |
|-------------------|---------------------------|
| `T`+5 | Europe, Middle East, and Africa,<br/> Americas (Option 1) |
| `T`+6 | Asia Pacific |
| `T`+10 | Americas (Option 2) |
For example, GitLab 16.9 released on 2024-02-15. Instances in the EMEA and Americas (Option 1) regions were then upgraded to 16.8 on 2024-02-20, 5 days after the 16.9 release.
## Emergency maintenance
In an event of a platform outage, degradation, or a security event requiring urgent action,
GitLab performs emergency maintenance per
[the emergency change processes](https://handbook.gitlab.com/handbook/engineering/infrastructure/emergency-change-processes/).
Emergency maintenance is initiated when urgent actions need to be executed by GitLab on a
GitLab Dedicated tenant instance. For example, when a critical (S1) security vulnerability requires urgent patching, GitLab performs emergency maintenance to upgrade your tenant instance to a secure version. This maintenance can occur outside scheduled maintenance windows.
GitLab provides advance emergency maintenance notice when possible and sends complete details after the issue is resolved. The GitLab Support team creates a support ticket and notifies all [Switchboard users](../../administration/dedicated/create_instance.md#step-1-get-access-to-switchboard) by email.
You cannot postpone emergency maintenance, because the same process must be applied to all GitLab Dedicated instances to ensure their security and availability.

View File

@ -43,7 +43,7 @@ This section displays the:
- Most recent emergency maintenance window (if applicable).
- Upcoming GitLab version upgrade.
For more information, see [Maintenance windows](../../administration/dedicated/create_instance.md#maintenance-window).
For more information, see [Maintenance windows](../../administration/dedicated/maintenance.md#maintenance-windows).
## Hosted runners

View File

@ -74,9 +74,55 @@ Example response:
## Configure pull mirroring for a project
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/494294) in GitLab 17.6.
Configure pull mirroring settings.
Supported attributes:
| Attribute | Type | Required | Description |
|:---------------------------------|:--------|:---------|:------------|
| `enabled` | boolean | No | Enables pull mirroring on project when set to `true`. |
| `url` | string | No | URL of remote repository being mirrored. |
| `auth_user` | string | No | Username used for authentication of a project to pull mirror. |
| `auth_password` | string | No | Password used for authentication of a project to pull mirror. |
| `mirror_trigger_builds` | boolean | No | Trigger pipelines for mirror updates when set to `true`. |
| `only_mirror_protected_branches` | boolean | No | Limits mirroring to only protected branches when set to `true`. |
| `mirror_branch_regex` | String | No | Contains a regular expression. Only branches with names matching the regex are mirrored. Requires `only_mirror_protected_branches` to be disabled. |
Example request to add pull mirroring:
```shell
curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" \
--header "Content-Type: application/json" \
--data '{
"enabled": true,
"url": "https://gitlab.example.com/group/project.git",
"auth_user": "user",
"auth_password": "password"
}' \
--url "https://gitlab.example.com/api/v4/projects/:id/mirror/pull"
```
Example request to remove pull mirroring:
```shell
curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" \
--url "https://gitlab.example.com/api/v4/projects/:id/mirror/pull" \
--data "enabled=false"
```
## Configure pull mirroring for a project (deprecated)
> - Field `mirror_branch_regex` [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/381667) in GitLab 15.8 [with a flag](../administration/feature_flags.md) named `mirror_only_branches_match_regex`. Disabled by default.
> - [Enabled by default](https://gitlab.com/gitlab-org/gitlab/-/issues/381667) in GitLab 16.0.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/410354) in GitLab 16.2. Feature flag `mirror_only_branches_match_regex` removed.
> - [Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/494294) in GitLab 17.6.
WARNING:
This configuration option was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/494294) in GitLab 17.6
and is planned for removal in v5 of the API. Use the [new configuration and endpoint](project_pull_mirroring.md#configure-pull-mirroring-for-a-project) instead.
This change is a breaking change.
Configure pull mirroring while [creating a new project](projects.md#create-a-project) or
[updating an existing project](projects.md#edit-a-project) by using the API if the remote repository is accessible publicly or by
@ -126,49 +172,6 @@ curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" \
--data "mirror=false"
```
## Configure pull mirroring for a project v2
DETAILS:
**Status:** Experiment
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/494294) in GitLab 17.5. This feature is an [experiment](../policy/experiment-beta-support.md).
Configure pull mirroring settings.
Supported attributes:
| Attribute | Type | Required | Description |
|:---------------------------------|:--------|:---------|:------------|
| `enabled` | boolean | No | Enables pull mirroring on project when set to `true`. |
| `url` | string | No | URL of remote repository being mirrored. |
| `auth_user` | string | No | Username used for authentication of a project to pull mirror. |
| `auth_password` | string | No | Password used for authentication of a project to pull mirror. |
| `mirror_trigger_builds` | boolean | No | Trigger pipelines for mirror updates when set to `true`. |
| `only_mirror_protected_branches` | boolean | No | Limits mirroring to only protected branches when set to `true`. |
| `mirror_branch_regex` | String | No | Contains a regular expression. Only branches with names matching the regex are mirrored. Requires `only_mirror_protected_branches` to be disabled. |
Example request to add pull mirroring:
```shell
curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" \
--header "Content-Type: application/json" \
--data '{
"enabled": true,
"url": "https://gitlab.example.com/group/project.git",
"auth_user": "user",
"auth_password": "password"
}' \
--url "https://gitlab.example.com/api/v4/projects/:id/mirror/pull"
```
Example request to remove pull mirroring:
```shell
curl --request PUT --header "PRIVATE-TOKEN: <your_access_token>" \
--url "https://gitlab.example.com/api/v4/projects/:id/mirror/pull" \
--data "enabled=false"
```
## Start the pull mirroring process for a project
Start the pull mirroring process for a project.

View File

@ -146,3 +146,12 @@ The `require_password_to_approve` was deprecated in GitLab 16.9. Use the `requir
instead. If you supply values to both fields, the `require_reauthentication_to_approve` field takes precedence.
The `require_password_to_approve` field will be removed in v5 of the GitLab REST API.
## Pull mirroring configuration with Project API
Breaking change. [Related issue](https://gitlab.com/gitlab-org/gitlab/-/issues/494294).
In GitLab 17.6, the [pull mirroring configuration with the Projects API](../project_pull_mirroring.md#configure-pull-mirroring-for-a-project-deprecated) is deprecated.
It is replaced by a new configuration and endpoint, [`projects/:id/mirror/pull`](../project_pull_mirroring.md#configure-pull-mirroring-for-a-project).
The previous configuration using the Projects API will be removed in v5 of the GitLab REST API.

View File

@ -12,7 +12,7 @@ GitLab Dedicated is running the n-1 GitLab version to provide sufficient run-up
GitLab Dedicated instances are automatically upgraded during scheduled maintenance windows throughout the week.
The [release rollout schedule](../administration/dedicated/create_instance.md#gitlab-release-rollout-schedule) for GitLab Dedicated outlines when instances are expected to be upgraded to a new release.
The [release rollout schedule](../administration/dedicated/maintenance.md#release-rollout-schedule) for GitLab Dedicated outlines when instances are expected to be upgraded to a new release.
## Feature flags

View File

@ -15,7 +15,7 @@ GitLab Dedicated instances receive regular maintenance to ensure security, relia
## Maintenance windows
GitLab leverages [weekly maintenance windows](../../administration/dedicated/create_instance.md#maintenance-window) to keep your instance up to date, fix security issues, and ensure the overall reliability and performance of your environment.
GitLab leverages [weekly maintenance windows](../../administration/dedicated/maintenance.md#maintenance-windows) to keep your instance up to date, fix security issues, and ensure the overall reliability and performance of your environment.
## Upgrades and patches
@ -62,4 +62,4 @@ Implementing a caching proxy in your network further reduces this risk.
## Emergency maintenance
[Emergency maintenance](../../administration/dedicated/create_instance.md#emergency-maintenance) addresses high-severity issues that affect your instance's security, availability, or reliability. When critical patch releases are available, GitLab Dedicated instances are upgraded as soon as possible using emergency maintenance procedures.
[Emergency maintenance](../../administration/dedicated/maintenance.md#emergency-maintenance) addresses high-severity issues that affect your instance's security, availability, or reliability. When critical patch releases are available, GitLab Dedicated instances are upgraded as soon as possible using emergency maintenance procedures.

View File

@ -97,11 +97,11 @@ GitLab leverages one weekly maintenance window to keep your instance up to date,
#### Upgrades
GitLab performs monthly upgrades to your instance with the latest patch release during your preferred [maintenance window](../../administration/dedicated/create_instance.md#maintenance-window) tracking one release behind the latest GitLab release. For example, if the latest version of GitLab available is 16.8, GitLab Dedicated runs on 16.7.
GitLab performs monthly upgrades to your instance with the latest patch release during your preferred [maintenance window](../../administration/dedicated/maintenance.md#maintenance-windows) tracking one release behind the latest GitLab release. For example, if the latest version of GitLab available is 16.8, GitLab Dedicated runs on 16.7.
#### Unscheduled maintenance
GitLab may conduct [unscheduled maintenance](../../administration/dedicated/create_instance.md#emergency-maintenance) to address high-severity issues affecting the security, availability, or reliability of your instance.
GitLab may conduct [unscheduled maintenance](../../administration/dedicated/maintenance.md#emergency-maintenance) to address high-severity issues affecting the security, availability, or reliability of your instance.
### Application

View File

@ -240,7 +240,6 @@ Selectors have the format `type`:`search string`. DAST searches for the selector
| `id` | `id:element` | Searches for an HTML element with the provided element ID. |
| `name` | `name:element` | Searches for an HTML element with the provided element name. |
| `xpath` | `xpath://input[@id="my-button"]/a` | Searches for a HTML element with the provided XPath. XPath searches are expected to be less performant than other searches. |
| None provided | `a.click-me` | Defaults to searching using a CSS selector. **{warning}** **[Deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/383348)** in GitLab 15.8. Replaced by explicitly declaring the selector type. |
#### Find selectors with Google Chrome

View File

@ -143,6 +143,7 @@ the following sections and tables provide an alternative.
| `approval_settings` | `object` | false | | Project settings that the policy overrides. |
| `fallback_behavior` | `object` | false | | Settings that affect invalid or unenforceable rules. |
| `policy_scope` | `object` of [`policy_scope`](index.md#scope) | false | | Defines the scope of the policy based on the projects, groups, or compliance framework labels you specify. |
| `policy_tuning` | `object` | false | | (Experimental) Settings that affect policy comparison logic. |
## `scan_finding` rule type
@ -282,6 +283,60 @@ On self-managed GitLab, by default the `fallback_behavior` field is available. T
|--------|----------|----------|--------------------|----------------------------------------------------------------------------------------------------------------------|
| `fail` | `string` | false | `open` or `closed` | `closed` (default): Invalid or unenforceable rules of a policy require approval. `open`: Invalid or unenforceable rules of a policy do not require approval. |
## `policy_tuning`
> - The `policy_tuning` field was [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/490092) in GitLab 17.6 [with a flag](../../../administration/feature_flags.md) named `unblock_rules_using_execution_policies`. Disabled by default.
FLAG:
The availability of this feature is controlled by a feature flag. For more information, see the history.
| Field | Type | Required | Possible values | Description |
|--------|----------|----------|--------------------|----------------------------------------------------------------------------------------------------------------------|
| `unblock_rules_using_execution_policies` | `boolean` | false | `true`, `false` | When enabled, approval rules become optional when scan artifacts are missing from the target branch and a scan is required by a scan execution policy. This option only works with an existing scan execution policy that has matching scanners. |
### Example `policy.yml` in a security policy project that uses `policy_tuning`
```yaml
scan_execution_policy:
- name: Enforce dependency scanning
description: ''
enabled: true
policy_scope:
projects:
excluding: []
rules:
- type: pipeline
branch_type: all
actions:
- scan: dependency_scanning
approval_policy:
- name: Dependency scanning approvals
description: ''
enabled: true
policy_scope:
projects:
excluding: []
rules:
- type: scan_finding
scanners:
- dependency_scanning
vulnerabilities_allowed: 0
severity_levels: []
vulnerability_states: []
branch_type: protected
actions:
- type: require_approval
approvals_required: 1
role_approvers:
- developer
- type: send_bot_message
enabled: true
fallback_behavior:
fail: closed
policy_tuning:
unblock_rules_using_execution_policies: true
```
## Policy scope schema
To customize policy enforcement, you can define a policy's scope to either include or exclude

View File

@ -163,6 +163,11 @@ For projects without a `.gitlab-ci.yml` file, this strategy will create the `.gi
implicitly. That is, a pipeline containing only the jobs defined in the pipeline execution policy is
executed.
NOTE:
When a pipeline execution policy uses workflow rules that prevent policy jobs from running, the only jobs that
run are the project's CI/CD jobs. If the project uses workflow rules that prevent project CI/CD jobs from running,
the only jobs that run are the pipeline execution policy jobs.
### `override_project_ci`
This strategy completely replaces the project's existing CI/CD configuration with a new one defined by the pipeline execution policy. This strategy is ideal when the entire pipeline needs to be standardized or replaced, such as enforcing organization-wide CI/CD standards or compliance requirements.

View File

@ -34,7 +34,7 @@ The below reflects the current retention periods of GitLab AI model [Sub-Process
All of these AI providers are under data protection agreements with GitLab that prohibit the use of Customer Content for their own purposes, except to perform their independent legal obligations.
GitLab does not retain input and output data unless customers provide consent through a GitLab [Support Ticket](https://about.gitlab.com/support/portal/). Learn more about [AI feature logging](../../administration/logs/index.md).
GitLab Duo Chat retains chat history to help you return quickly to previously discussed topics. You can delete chats in the GitLab Duo Chat interface. GitLab does not otherwise retain input and output data unless customers provide consent through a GitLab [Support Ticket](https://about.gitlab.com/support/portal/). Learn more about [AI feature logging](../../administration/logs/index.md).
## Training data

View File

@ -1,93 +0,0 @@
# frozen_string_literal: true
module Gitlab
module BackgroundMigration
class BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob < Gitlab::BackgroundMigration::BatchedMigrationJob
class Project < ::ApplicationRecord
self.table_name = 'projects'
has_one :statistics, class_name: '::Gitlab::BackgroundMigration::BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob::ProjectStatistics'
end
class ProjectStatistics < ::ApplicationRecord
include ::EachBatch
self.table_name = 'project_statistics'
belongs_to :project, class_name: '::Gitlab::BackgroundMigration::BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob::Project'
def update_storage_size(storage_size_components)
new_storage_size = storage_size_components.sum { |component| method(component).call }
# Only update storage_size if storage_size needs updating
return unless storage_size != new_storage_size
self.storage_size = new_storage_size
save!
::Namespaces::ScheduleAggregationWorker.perform_async(project.namespace_id)
log_with_data('Scheduled Namespaces::ScheduleAggregationWorker')
end
def wiki_size
super.to_i
end
def snippets_size
super.to_i
end
private
def log_with_data(log_line)
log_info(
log_line,
project_id: project.id,
pipeline_artifacts_size: pipeline_artifacts_size,
storage_size: storage_size,
namespace_id: project.namespace_id
)
end
def log_info(message, **extra)
::Gitlab::BackgroundMigration::Logger.info(
migrator: 'BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob',
message: message,
**extra
)
end
end
scope_to ->(relation) {
relation.where.not(pipeline_artifacts_size: 0)
}
operation_name :update_storage_size
feature_category :consumables_cost_management
def perform
each_sub_batch do |sub_batch|
ProjectStatistics.merge(sub_batch).each do |statistics|
statistics.update_storage_size(storage_size_components)
end
end
end
private
# Overridden in EE
def storage_size_components
[
:repository_size,
:wiki_size,
:lfs_objects_size,
:build_artifacts_size,
:packages_size,
:snippets_size,
:uploads_size
]
end
end
end
end
Gitlab::BackgroundMigration::BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob.prepend_mod

View File

@ -50216,12 +50216,6 @@ msgstr ""
msgid "SecurityOrchestration|Status"
msgstr ""
msgid "SecurityOrchestration|Step 1: Choose a policy type"
msgstr ""
msgid "SecurityOrchestration|Step 2: Policy details"
msgstr ""
msgid "SecurityOrchestration|Summary"
msgstr ""

View File

@ -217,8 +217,7 @@ module QA
end
def click_diffs_tab
# Do not wait for spinner due to https://gitlab.com/gitlab-org/gitlab/-/issues/398584
click_element('diffs-tab', skip_finished_loading_check: true)
click_element('diffs-tab')
end
def click_pipeline_link

View File

@ -218,7 +218,6 @@ spec/frontend/ci/pipelines_page/components/pipelines_artifacts_spec.js
spec/frontend/ci/pipelines_page/components/pipelines_filtered_search_spec.js
spec/frontend/ci/runner/admin_runner_show/admin_runner_show_app_spec.js
spec/frontend/ci/runner/admin_runners/admin_runners_app_spec.js
spec/frontend/ci/runner/components/cells/link_cell_spec.js
spec/frontend/ci/runner/components/registration/registration_instructions_spec.js
spec/frontend/ci/runner/components/runner_details_spec.js
spec/frontend/ci/runner/components/runner_form_fields_spec.js
@ -446,7 +445,6 @@ spec/frontend/vue_shared/components/project_selector/project_selector_spec.js
spec/frontend/vue_shared/components/registry/registry_search_spec.js
spec/frontend/vue_shared/components/runner_instructions/instructions/runner_aws_instructions_spec.js
spec/frontend/vue_shared/components/runner_instructions/runner_instructions_modal_spec.js
spec/frontend/vue_shared/components/segmented_control_button_group_spec.js
spec/frontend/vue_shared/components/smart_virtual_list_spec.js
spec/frontend/vue_shared/components/source_viewer/components/chunk_spec.js
spec/frontend/vue_shared/components/tooltip_on_truncate_spec.js

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
FactoryBot.define do
factory :members_deletion_schedules, class: 'Members::DeletionSchedule' do
namespace { association(:group) }
user { association(:user) }
scheduled_by { association(:user) }
end
end

View File

@ -4,69 +4,73 @@ import LinkCell from '~/ci/runner/components/cells/link_cell.vue';
describe('LinkCell', () => {
let wrapper;
let onClick;
const findGlLink = () => wrapper.findComponent(GlLink);
const findLink = () => wrapper.findComponent(GlLink);
const findSpan = () => wrapper.find('span');
const createComponent = ({ props = {}, ...options } = {}) => {
const createComponent = (props = {}) => {
wrapper = shallowMountExtended(LinkCell, {
propsData: {
...props,
},
...options,
attrs: { foo: 'bar' },
slots: {
default: 'My Text',
},
listeners: {
click: onClick,
},
});
};
it('when an href is provided, renders a link', () => {
createComponent({ props: { href: '/url' } });
expect(findGlLink().exists()).toBe(true);
beforeEach(() => {
onClick = jest.fn();
});
it('when an href is not provided, renders no link', () => {
createComponent();
expect(findGlLink().exists()).toBe(false);
});
describe('works as a wrapper', () => {
describe('when an href is provided', () => {
beforeEach(() => {
createComponent({ href: '/url' });
});
describe.each`
href | findContent
${null} | ${findSpan}
${'/url'} | ${findGlLink}
`('When href is $href', ({ href, findContent }) => {
const content = 'My Text';
const attrs = { foo: 'bar' };
const listeners = {
click: jest.fn(),
};
it('renders a link', () => {
expect(findLink().exists()).toBe(true);
});
beforeEach(() => {
createComponent({
props: { href },
slots: {
default: content,
},
attrs,
listeners,
it('passes attributes', () => {
expect(findLink().attributes()).toMatchObject({ foo: 'bar' });
});
it('passes event listeners', () => {
expect(onClick).toHaveBeenCalledTimes(0);
findLink().vm.$emit('click');
expect(onClick).toHaveBeenCalledTimes(1);
});
});
afterAll(() => {
listeners.click.mockReset();
});
describe('when an href is not provided', () => {
beforeEach(() => {
createComponent({ href: null });
});
it('Renders content', () => {
expect(findContent().text()).toBe(content);
});
it('renders no link', () => {
expect(findLink().exists()).toBe(false);
});
it('Passes attributes', () => {
expect(findContent().attributes()).toMatchObject(attrs);
});
it('passes attributes', () => {
expect(findSpan().attributes()).toMatchObject({ foo: 'bar' });
});
it('Passes event listeners', () => {
expect(listeners.click).toHaveBeenCalledTimes(0);
it('passes event listeners', () => {
expect(onClick).toHaveBeenCalledTimes(0);
findContent().vm.$emit('click');
findSpan().trigger('click');
expect(listeners.click).toHaveBeenCalledTimes(1);
expect(onClick).toHaveBeenCalledTimes(1);
});
});
});
});

View File

@ -32,6 +32,7 @@ describe('kubernetes_logs', () => {
gitlabAgentId,
});
let k8sLogsQueryMock;
let abortK8sPodLogsStreamMock;
let environmentDataMock;
const defaultEnvironmentData = {
@ -63,6 +64,7 @@ describe('kubernetes_logs', () => {
k8sLogsQueryMock = jest.fn().mockResolvedValue({
logs: logsMockData,
});
abortK8sPodLogsStreamMock = jest.fn().mockResolvedValue({ errors: [] });
environmentDataMock = jest.fn().mockResolvedValue(defaultEnvironmentData);
};
@ -71,6 +73,9 @@ describe('kubernetes_logs', () => {
Query: {
k8sLogs: k8sLogsQueryMock,
},
Mutation: {
abortK8sPodLogsStream: abortK8sPodLogsStreamMock,
},
};
return createMockApollo([[environmentClusterAgentQuery, environmentDataMock]], mockResolvers);
@ -275,4 +280,26 @@ describe('kubernetes_logs', () => {
});
});
});
describe('beforeDestroy', () => {
beforeEach(async () => {
mountComponent();
await waitForPromises();
wrapper.destroy();
});
it('triggers `abortPodLogsStream` mutation to unsubscribe from the stream', () => {
expect(abortK8sPodLogsStreamMock).toHaveBeenCalledWith(
{},
{
configuration,
namespace: defaultProps.namespace,
podName: defaultProps.podName,
containerName: '',
},
expect.anything(),
expect.anything(),
);
});
});
});

View File

@ -1,34 +1,38 @@
import { EVENT_TIMEOUT, EVENT_PLAIN_TEXT, EVENT_ERROR } from '@gitlab/cluster-client';
import throttle from 'lodash/throttle';
import k8sLogsQuery from '~/environments/graphql/queries/k8s_logs.query.graphql';
import { buildWatchPath, k8sLogs } from '~/environments/graphql/resolvers/kubernetes/k8s_logs';
import k8sPodLogsWatcherQuery from '~/environments/graphql/queries/k8s_pod_logs_watcher.query.graphql';
import {
buildWatchPath,
k8sLogs,
abortK8sPodLogsStream,
} from '~/environments/graphql/resolvers/kubernetes/k8s_logs';
import { bootstrapWatcherMock } from '../watcher_mock_helper';
jest.mock('lodash/throttle', () => jest.fn());
let watchStream;
const configuration = {
basePath: 'kas-proxy/',
baseOptions: {
headers: { 'GitLab-Agent-Id': '1' },
},
};
const podName = 'test-pod';
const namespace = 'default';
const client = { writeQuery: jest.fn(), readQuery: jest.fn() };
describe('buildWatchPath', () => {
it('should return the correct path with namespace', () => {
const resource = 'my-pod';
const api = 'api/v1';
const namespace = 'my-namespace';
const path = buildWatchPath({ resource, api, namespace });
expect(path).toBe(`/${api}/namespaces/${namespace}/pods/${resource}/log`);
});
});
describe('k8sLogs', () => {
let watchStream;
const configuration = {
basePath: 'kas-proxy/',
baseOptions: {
headers: { 'GitLab-Agent-Id': '1' },
},
};
const podName = 'test-pod';
const namespace = 'default';
const client = { writeQuery: jest.fn(), readQuery: jest.fn() };
beforeEach(() => {
watchStream = bootstrapWatcherMock();
});
@ -89,4 +93,64 @@ describe('k8sLogs', () => {
});
},
);
it('should update `k8sPodLogsWatcher` query with the watcher', async () => {
await k8sLogs(null, { configuration, namespace, podName }, { client });
watchStream.triggerEvent(EVENT_PLAIN_TEXT, 'Log data');
expect(client.writeQuery).toHaveBeenCalledWith({
query: k8sPodLogsWatcherQuery,
variables: {
namespace,
configuration,
podName,
},
data: {
k8sPodLogsWatcher: { watcher: {} },
},
});
});
});
describe('abortK8sPodLogsStream', () => {
beforeEach(() => {
watchStream = bootstrapWatcherMock();
});
it('should read `k8sPodLogsWatcher` query to get the watcher', async () => {
await abortK8sPodLogsStream(
null,
{
configuration,
namespace,
podName,
},
{ client },
);
expect(client.readQuery).toHaveBeenCalledWith({
query: k8sPodLogsWatcherQuery,
variables: {
namespace,
configuration,
podName,
},
});
});
it('should abort the stream when the watcher is available', async () => {
client.readQuery.mockReturnValue({ k8sPodLogsWatcher: { watcher: watchStream } });
await abortK8sPodLogsStream(
null,
{
configuration,
namespace,
podName,
},
{ client },
);
expect(watchStream.abortStream).toHaveBeenCalledTimes(1);
});
});

View File

@ -4,6 +4,7 @@ const mockWatcher = WatchApi.prototype;
const mockSubscribeFn = jest.fn().mockImplementation(() => {
return Promise.resolve(mockWatcher);
});
const mockAbortStreamFn = jest.fn();
const MockWatchStream = () => {
const callbacks = {};
@ -31,10 +32,12 @@ const MockWatchStream = () => {
export const bootstrapWatcherMock = () => {
const watchStream = new MockWatchStream();
jest.spyOn(mockWatcher, 'subscribeToStream').mockImplementation(mockSubscribeFn);
jest.spyOn(mockWatcher, 'abortStream').mockImplementation(mockAbortStreamFn);
jest.spyOn(mockWatcher, 'on').mockImplementation(watchStream.registerCallback);
return {
triggerEvent: watchStream.triggerEvent,
subscribeToStreamMock: mockSubscribeFn,
abortStream: mockAbortStreamFn,
};
};

View File

@ -1,8 +1,8 @@
import { nextTick } from 'vue';
import { GlSegmentedControl } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import CiCdAnalyticsAreaChart from '~/vue_shared/components/ci_cd_analytics/ci_cd_analytics_area_chart.vue';
import CiCdAnalyticsCharts from '~/vue_shared/components/ci_cd_analytics/ci_cd_analytics_charts.vue';
import SegmentedControlButtonGroup from '~/vue_shared/components/segmented_control_button_group.vue';
import { transformedAreaChartData, chartOptions } from '../mock_data';
const charts = [
@ -48,7 +48,7 @@ describe('~/vue_shared/components/ci_cd_analytics/ci_cd_analytics_charts.vue', (
});
const findMetricsSlot = () => wrapper.findByTestId('metrics-slot');
const findSegmentedControl = () => wrapper.findComponent(SegmentedControlButtonGroup);
const findSegmentedControl = () => wrapper.findComponent(GlSegmentedControl);
describe('segmented control', () => {
beforeEach(() => {

View File

@ -1,132 +0,0 @@
import { GlButtonGroup, GlButton } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import SegmentedControlButtonGroup from '~/vue_shared/components/segmented_control_button_group.vue';
const DEFAULT_OPTIONS = [
{ text: 'Lorem', value: 'abc' },
{ text: 'Ipsum', value: 'def' },
{ text: 'Foo', value: 'x', disabled: true },
{ text: 'Dolar', value: 'ghi' },
];
describe('~/vue_shared/components/segmented_control_button_group.vue', () => {
let consoleSpy;
let wrapper;
const createComponent = (props = {}, scopedSlots = {}) => {
wrapper = shallowMount(SegmentedControlButtonGroup, {
propsData: {
value: DEFAULT_OPTIONS[0].value,
options: DEFAULT_OPTIONS,
...props,
},
scopedSlots,
});
};
const findButtonGroup = () => wrapper.findComponent(GlButtonGroup);
const findButtons = () => findButtonGroup().findAllComponents(GlButton);
const findButtonsData = () =>
findButtons().wrappers.map((x) => ({
selected: x.props('selected'),
text: x.text(),
disabled: x.props('disabled'),
}));
const findButtonWithText = (text) => findButtons().wrappers.find((x) => x.text() === text);
const optionsAsButtonData = (options) =>
options.map(({ text, disabled = false }) => ({
selected: false,
text,
disabled,
}));
describe('default', () => {
beforeEach(() => {
createComponent();
});
it('renders button group', () => {
expect(findButtonGroup().exists()).toBe(true);
});
it('renders buttons', () => {
const expectation = optionsAsButtonData(DEFAULT_OPTIONS);
expectation[0].selected = true;
expect(findButtonsData()).toEqual(expectation);
});
describe.each(DEFAULT_OPTIONS.filter((x) => !x.disabled))(
'when button clicked %p',
({ text, value }) => {
it('emits input with value', () => {
expect(wrapper.emitted('input')).toBeUndefined();
findButtonWithText(text).vm.$emit('click');
expect(wrapper.emitted('input')).toEqual([[value]]);
});
},
);
});
const VALUE_TEST_CASES = [0, 1, 3].map((index) => [DEFAULT_OPTIONS[index].value, index]);
describe.each(VALUE_TEST_CASES)('with value=%s', (value, index) => {
it(`renders selected button at ${index}`, () => {
createComponent({ value });
const expectation = optionsAsButtonData(DEFAULT_OPTIONS);
expectation[index].selected = true;
expect(findButtonsData()).toEqual(expectation);
});
});
describe('with button-content slot', () => {
it('renders button content based on slot', () => {
createComponent(
{},
{
'button-content': `<template #button-content="{ text }">In a slot - {{ text }}</template>`,
},
);
expect(findButtonsData().map((x) => x.text)).toEqual(
DEFAULT_OPTIONS.map((x) => `In a slot - ${x.text}`),
);
});
});
describe('options prop validation', () => {
beforeEach(() => {
consoleSpy = jest.spyOn(console, 'error').mockImplementation();
});
it.each([
[[{ disabled: true }]],
[[{ value: '1', disabled: 'false' }]],
[[{ value: null, disabled: 'true' }]],
[[[{ value: true }, null]]],
])('with options=%j, fails validation', (options) => {
createComponent({ options });
expect(consoleSpy).toHaveBeenCalledTimes(1);
expect(consoleSpy).toHaveBeenCalledWith(
expect.stringContaining('Invalid prop: custom validator check failed for prop "options"'),
);
});
it.each([
[[{ value: '1' }]],
[[{ value: 1, disabled: true }]],
[[{ value: true, disabled: false }]],
[[{ value: true, props: { 'data-testid': 'test' } }]],
])('with options=%j, passes validation', (options) => {
createComponent({ options });
expect(consoleSpy).not.toHaveBeenCalled();
});
});
});

View File

@ -1,113 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Gitlab::BackgroundMigration::BackfillProjectStatisticsStorageSizeWithoutPipelineArtifactsSizeJob,
schema: 20230721095222,
feature_category: :consumables_cost_management do
include MigrationHelpers::ProjectStatisticsHelper
include_context 'when backfilling project statistics'
let(:default_pipeline_artifacts_size) { 5 }
let(:default_stats) do
{
repository_size: 1,
wiki_size: 1,
lfs_objects_size: 1,
build_artifacts_size: 1,
packages_size: 1,
snippets_size: 1,
uploads_size: 1,
pipeline_artifacts_size: default_pipeline_artifacts_size,
storage_size: default_storage_size
}
end
describe '#filter_batch' do
it 'filters out project_statistics with no artifacts size' do
project_statistics = generate_records(default_projects, project_statistics_table, default_stats)
project_statistics_table.create!(
project_id: proj5.id,
namespace_id: proj5.namespace_id,
repository_size: 1,
wiki_size: 1,
lfs_objects_size: 1,
build_artifacts_size: 1,
packages_size: 1,
snippets_size: 1,
pipeline_artifacts_size: 0,
uploads_size: 1,
storage_size: 7
)
expected = project_statistics.map(&:id)
actual = migration.filter_batch(project_statistics_table).pluck(:id)
expect(actual).to match_array(expected)
end
end
describe '#perform' do
subject(:perform_migration) { migration.perform }
context 'when project_statistics backfill runs' do
before do
generate_records(default_projects, project_statistics_table, default_stats)
end
context 'when storage_size includes pipeline_artifacts_size' do
it 'removes pipeline_artifacts_size from storage_size' do
allow(::Namespaces::ScheduleAggregationWorker).to receive(:perform_async)
expect(project_statistics_table.pluck(:storage_size).uniq).to match_array([default_storage_size])
perform_migration
expect(project_statistics_table.pluck(:storage_size).uniq).to match_array(
[default_storage_size - default_pipeline_artifacts_size]
)
expect(::Namespaces::ScheduleAggregationWorker).to have_received(:perform_async).exactly(4).times
end
end
context 'when storage_size does not include default_pipeline_artifacts_size' do
it 'does not update the record' do
allow(::Namespaces::ScheduleAggregationWorker).to receive(:perform_async)
proj_stat = project_statistics_table.last
expect(proj_stat.storage_size).to eq(default_storage_size)
proj_stat.storage_size = default_storage_size - default_pipeline_artifacts_size
proj_stat.save!
perform_migration
expect(project_statistics_table.pluck(:storage_size).uniq).to match_array(
[default_storage_size - default_pipeline_artifacts_size]
)
expect(::Namespaces::ScheduleAggregationWorker).to have_received(:perform_async).exactly(3).times
end
end
end
it 'coerces a null wiki_size to 0' do
project_statistics = create_project_stats(projects, namespaces, default_stats, { wiki_size: nil })
allow(::Namespaces::ScheduleAggregationWorker).to receive(:perform_async)
migration = create_migration(end_id: project_statistics.project_id)
migration.perform
project_statistics.reload
expect(project_statistics.storage_size).to eq(6)
end
it 'coerces a null snippets_size to 0' do
project_statistics = create_project_stats(projects, namespaces, default_stats, { snippets_size: nil })
allow(::Namespaces::ScheduleAggregationWorker).to receive(:perform_async)
migration = create_migration(end_id: project_statistics.project_id)
migration.perform
project_statistics.reload
expect(project_statistics.storage_size).to eq(6)
end
end
end

View File

@ -0,0 +1,11 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Members::DeletionSchedule, feature_category: :seat_cost_management do
describe 'associations' do
it { is_expected.to belong_to(:namespace).required }
it { is_expected.to belong_to(:user).required }
it { is_expected.to belong_to(:scheduled_by).required }
end
end

View File

@ -1,37 +0,0 @@
# frozen_string_literal: true
module MigrationHelpers
module ProjectStatisticsHelper
def generate_records(projects, table, values = {})
projects.map do |proj|
table.create!(
values.merge({
project_id: proj.id,
namespace_id: proj.namespace_id
})
)
end
end
def create_migration(end_id:)
described_class.new(start_id: 1, end_id: end_id,
batch_table: 'project_statistics', batch_column: 'project_id',
sub_batch_size: 1_000, pause_ms: 0,
connection: ApplicationRecord.connection)
end
def create_project_stats(project_table, namespace, default_stats, override_stats = {})
stats = default_stats.merge(override_stats)
group = namespace.create!(name: 'group_a', path: 'group-a', type: 'Group')
project_namespace = namespace.create!(name: 'project_a', path: 'project_a', type: 'Project', parent_id: group.id)
proj = project_table.create!(name: 'project_a', path: 'project-a', namespace_id: group.id,
project_namespace_id: project_namespace.id)
project_statistics_table.create!(
project_id: proj.id,
namespace_id: group.id,
**stats
)
end
end
end

View File

@ -1,106 +0,0 @@
# frozen_string_literal: true
RSpec.shared_context 'when backfilling project statistics' do
let!(:namespaces) { table(:namespaces) }
let!(:project_statistics_table) { table(:project_statistics) }
let!(:projects) { table(:projects) }
let!(:count_of_columns) { ProjectStatistics::STORAGE_SIZE_COMPONENTS.count }
let(:default_storage_size) { 12 }
let!(:root_group) do
namespaces.create!(name: 'root-group', path: 'root-group', type: 'Group') do |new_group|
new_group.update!(traversal_ids: [new_group.id])
end
end
let!(:group) do
namespaces.create!(name: 'group', path: 'group', parent_id: root_group.id, type: 'Group') do |new_group|
new_group.update!(traversal_ids: [root_group.id, new_group.id])
end
end
let!(:sub_group) do
namespaces.create!(name: 'subgroup', path: 'subgroup', parent_id: group.id, type: 'Group') do |new_group|
new_group.update!(traversal_ids: [root_group.id, group.id, new_group.id])
end
end
let!(:namespace1) do
namespaces.create!(
name: 'namespace1', type: 'Group', path: 'space1'
)
end
let!(:proj_namespace1) do
namespaces.create!(
name: 'proj1', path: 'proj1', type: 'Project', parent_id: namespace1.id
)
end
let!(:proj_namespace2) do
namespaces.create!(
name: 'proj2', path: 'proj2', type: 'Project', parent_id: namespace1.id
)
end
let!(:proj_namespace3) do
namespaces.create!(
name: 'proj3', path: 'proj3', type: 'Project', parent_id: sub_group.id
)
end
let!(:proj_namespace4) do
namespaces.create!(
name: 'proj4', path: 'proj4', type: 'Project', parent_id: sub_group.id
)
end
let!(:proj_namespace5) do
namespaces.create!(
name: 'proj5', path: 'proj5', type: 'Project', parent_id: sub_group.id
)
end
let!(:proj1) do
projects.create!(
name: 'proj1', path: 'proj1', namespace_id: namespace1.id, project_namespace_id: proj_namespace1.id
)
end
let!(:proj2) do
projects.create!(
name: 'proj2', path: 'proj2', namespace_id: namespace1.id, project_namespace_id: proj_namespace2.id
)
end
let!(:proj3) do
projects.create!(
name: 'proj3', path: 'proj3', namespace_id: sub_group.id, project_namespace_id: proj_namespace3.id
)
end
let!(:proj4) do
projects.create!(
name: 'proj4', path: 'proj4', namespace_id: sub_group.id, project_namespace_id: proj_namespace4.id
)
end
let!(:proj5) do
projects.create!(
name: 'proj5', path: 'proj5', namespace_id: sub_group.id, project_namespace_id: proj_namespace5.id
)
end
let(:migration) do
described_class.new(start_id: 1, end_id: proj4.id,
batch_table: 'project_statistics', batch_column: 'project_id',
sub_batch_size: 1_000, pause_ms: 0,
connection: ApplicationRecord.connection)
end
let(:default_projects) do
[
proj1, proj2, proj3, proj4
]
end
end