diff --git a/.rubocop_todo/capybara/testid_finders.yml b/.rubocop_todo/capybara/testid_finders.yml
index c674b7f46b1..558b21c7d35 100644
--- a/.rubocop_todo/capybara/testid_finders.yml
+++ b/.rubocop_todo/capybara/testid_finders.yml
@@ -17,21 +17,6 @@ Capybara/TestidFinders:
- 'spec/features/projects/compare_spec.rb'
- 'spec/features/projects/environments/environment_spec.rb'
- 'spec/features/projects/environments/environments_spec.rb'
- - 'spec/features/projects/feature_flags/user_sees_feature_flag_list_spec.rb'
- - 'spec/features/projects/fork_spec.rb'
- - 'spec/features/projects/integrations/user_activates_jira_spec.rb'
- - 'spec/features/projects/issues/design_management/user_views_designs_with_svg_xss_spec.rb'
- - 'spec/features/projects/jobs/permissions_spec.rb'
- - 'spec/features/projects/jobs/user_browses_job_spec.rb'
- - 'spec/features/projects/jobs/user_browses_jobs_spec.rb'
- - 'spec/features/projects/jobs/user_triggers_manual_job_with_variables_spec.rb'
- - 'spec/features/projects/jobs_spec.rb'
- - 'spec/features/projects/members/group_member_cannot_leave_group_project_spec.rb'
- - 'spec/features/projects/members/groups_with_access_list_spec.rb'
- - 'spec/features/projects/members/master_adds_member_with_expiration_date_spec.rb'
- - 'spec/features/projects/members/sorting_spec.rb'
- - 'spec/features/projects/packages_spec.rb'
- - 'spec/features/projects/pipeline_schedules_spec.rb'
- 'spec/features/projects/pipelines/pipeline_spec.rb'
- 'spec/features/projects/pipelines/pipelines_spec.rb'
- 'spec/features/projects/releases/user_creates_release_spec.rb'
diff --git a/.rubocop_todo/style/inline_disable_annotation.yml b/.rubocop_todo/style/inline_disable_annotation.yml
index 4bf891cd5c3..c464f13d704 100644
--- a/.rubocop_todo/style/inline_disable_annotation.yml
+++ b/.rubocop_todo/style/inline_disable_annotation.yml
@@ -2833,7 +2833,6 @@ Style/InlineDisableAnnotation:
- 'spec/lib/banzai/filter/image_link_filter_spec.rb'
- 'spec/lib/banzai/pipeline/incident_management/timeline_event_pipeline_spec.rb'
- 'spec/lib/container_registry/gitlab_api_client_spec.rb'
- - 'spec/lib/generators/batched_background_migration/batched_background_migration_generator_spec.rb'
- 'spec/lib/gitlab/alert_management/payload/base_spec.rb'
- 'spec/lib/gitlab/audit/target_spec.rb'
- 'spec/lib/gitlab/audit/type/definition_spec.rb'
diff --git a/app/assets/javascripts/diffs/store/mutations.js b/app/assets/javascripts/diffs/store/mutations.js
index 370073f1033..2cc2e13648f 100644
--- a/app/assets/javascripts/diffs/store/mutations.js
+++ b/app/assets/javascripts/diffs/store/mutations.js
@@ -210,6 +210,8 @@ export default {
if (diffLines.length && positionType !== FILE_DIFF_POSITION_TYPE) {
const line = diffLines.find(isTargetLine);
+ // skip if none of the discussion positions matched a diff position
+ if (!line) return;
const discussions = addDiscussion(line.discussions || []);
Object.assign(line, {
discussions,
diff --git a/app/assets/javascripts/environments/components/kubernetes_agent_info.vue b/app/assets/javascripts/environments/components/kubernetes_agent_info.vue
index 03bde8d64ac..a2c7daf0797 100644
--- a/app/assets/javascripts/environments/components/kubernetes_agent_info.vue
+++ b/app/assets/javascripts/environments/components/kubernetes_agent_info.vue
@@ -46,7 +46,7 @@ export default {
>{{ agentId }}
-
+
-
{{ $options.i18n.neverConnectedText }}
diff --git a/app/assets/javascripts/environments/components/kubernetes_status_bar.vue b/app/assets/javascripts/environments/components/kubernetes_status_bar.vue
index b41d1773851..2cf37ccca27 100644
--- a/app/assets/javascripts/environments/components/kubernetes_status_bar.vue
+++ b/app/assets/javascripts/environments/components/kubernetes_status_bar.vue
@@ -2,6 +2,8 @@
import { GlLoadingIcon, GlBadge, GlPopover, GlSprintf, GlLink } from '@gitlab/ui';
import { s__ } from '~/locale';
import {
+ CLUSTER_HEALTH_SUCCESS,
+ CLUSTER_HEALTH_ERROR,
HEALTH_BADGES,
SYNC_STATUS_BADGES,
STATUS_TRUE,
@@ -28,7 +30,7 @@ export default {
type: String,
default: '',
validator(val) {
- return ['error', 'success', ''].includes(val);
+ return [CLUSTER_HEALTH_ERROR, CLUSTER_HEALTH_SUCCESS, ''].includes(val);
},
},
configuration: {
diff --git a/app/assets/javascripts/environments/constants.js b/app/assets/javascripts/environments/constants.js
index 64873a6ac68..28f0bde547e 100644
--- a/app/assets/javascripts/environments/constants.js
+++ b/app/assets/javascripts/environments/constants.js
@@ -94,13 +94,16 @@ export const SERVICES_LIMIT_PER_PAGE = 10;
export const CLUSTER_STATUS_HEALTHY_TEXT = s__('Environment|Healthy');
export const CLUSTER_STATUS_UNHEALTHY_TEXT = s__('Environment|Unhealthy');
+export const CLUSTER_HEALTH_SUCCESS = 'success';
+export const CLUSTER_HEALTH_ERROR = 'error';
+
export const HEALTH_BADGES = {
- success: {
+ [CLUSTER_HEALTH_SUCCESS]: {
variant: 'success',
text: CLUSTER_STATUS_HEALTHY_TEXT,
icon: 'status-success',
},
- error: {
+ [CLUSTER_HEALTH_ERROR]: {
variant: 'danger',
text: CLUSTER_STATUS_UNHEALTHY_TEXT,
icon: 'status-alert',
diff --git a/app/assets/javascripts/environments/environment_details/components/kubernetes_overview.vue b/app/assets/javascripts/environments/environment_details/components/kubernetes_overview.vue
new file mode 100644
index 00000000000..e7d6d639679
--- /dev/null
+++ b/app/assets/javascripts/environments/environment_details/components/kubernetes_overview.vue
@@ -0,0 +1,155 @@
+
+
+
+
+
+
+
+
+
+
+ {{ error }}
+
+
+
+
+
+
+
+
+ {{ content }}
+
+
+
+
diff --git a/app/assets/javascripts/environments/environment_details/index.vue b/app/assets/javascripts/environments/environment_details/index.vue
index e22a51389ee..690942349d6 100644
--- a/app/assets/javascripts/environments/environment_details/index.vue
+++ b/app/assets/javascripts/environments/environment_details/index.vue
@@ -3,12 +3,14 @@
import { GlTabs, GlTab } from '@gitlab/ui';
import { s__ } from '~/locale';
import DeploymentHistory from './components/deployment_history.vue';
+import KubernetesOverview from './components/kubernetes_overview.vue';
export default {
components: {
GlTabs,
GlTab,
DeploymentHistory,
+ KubernetesOverview,
},
props: {
projectFullPath: {
@@ -30,19 +32,43 @@ export default {
default: null,
},
},
+ data() {
+ return {
+ currentTabIndex: 0,
+ };
+ },
i18n: {
deploymentHistory: s__('Environments|Deployment history'),
+ kubernetesOverview: s__('Environments|Kubernetes overview'),
},
params: {
deployments: 'deployment-history',
+ kubernetes: 'kubernetes-overview',
+ },
+ methods: {
+ linkClass(index) {
+ return index === this.currentTabIndex ? 'gl-inset-border-b-2-theme-accent' : '';
+ },
},
};
-
+
+
+
+
+
{
provide: {
projectPath: dataSet.projectFullPath,
graphqlEtagKey: dataSet.graphqlEtagKey,
+ kasTunnelUrl: removeLastSlashInUrlPath(dataElement.dataset.kasTunnelUrl),
},
render(createElement) {
return createElement('router-view');
diff --git a/app/assets/javascripts/graphql_shared/possible_types.json b/app/assets/javascripts/graphql_shared/possible_types.json
index 1322d4082c8..9584f2fcd8f 100644
--- a/app/assets/javascripts/graphql_shared/possible_types.json
+++ b/app/assets/javascripts/graphql_shared/possible_types.json
@@ -11,6 +11,9 @@
"AuditEventStreamingHeader",
"AuditEventsStreamingInstanceHeader"
],
+ "CiRunnerCloudProvisioningOptions": [
+ "CiRunnerGoogleCloudProvisioningOptions"
+ ],
"CiVariable": [
"CiGroupVariable",
"CiInstanceVariable",
diff --git a/app/assets/javascripts/invite_members/init_invite_groups_modal.js b/app/assets/javascripts/invite_members/init_invite_groups_modal.js
index 53b756b610f..a2efbcba677 100644
--- a/app/assets/javascripts/invite_members/init_invite_groups_modal.js
+++ b/app/assets/javascripts/invite_members/init_invite_groups_modal.js
@@ -30,6 +30,7 @@ export default function initInviteGroupsModal() {
el,
provide: {
freeUsersLimit: parseInt(el.dataset.freeUsersLimit, 10),
+ overageMembersModalAvailable: parseBoolean(el.dataset.overageMembersModalAvailable),
},
render: (createElement) =>
createElement(InviteGroupsModal, {
diff --git a/app/assets/javascripts/invite_members/init_invite_members_modal.js b/app/assets/javascripts/invite_members/init_invite_members_modal.js
index 385ffeaf9e9..b07608e4c45 100644
--- a/app/assets/javascripts/invite_members/init_invite_members_modal.js
+++ b/app/assets/javascripts/invite_members/init_invite_members_modal.js
@@ -25,6 +25,7 @@ export default (function initInviteMembersModal() {
name: 'InviteMembersModalRoot',
provide: {
name: el.dataset.name,
+ overageMembersModalAvailable: parseBoolean(el.dataset.overageMembersModalAvailable),
},
render: (createElement) =>
createElement(InviteMembersModal, {
diff --git a/app/assets/stylesheets/pages/notes.scss b/app/assets/stylesheets/pages/notes.scss
index 9f9788b44f2..73dee228c04 100644
--- a/app/assets/stylesheets/pages/notes.scss
+++ b/app/assets/stylesheets/pages/notes.scss
@@ -46,14 +46,10 @@ $system-note-icon-m-left: $avatar-m-left + $icon-size-diff / $avatar-m-ratio;
.gl-dark & {
background: var(--gray-10);
}
-
+
.gl-dark .modal-body & {
background: var(--gray-50, $gray-50);
}
-
- &.note-comment {
- top: 30px;
- }
}
}
diff --git a/app/controllers/projects/environments_controller.rb b/app/controllers/projects/environments_controller.rb
index 65cbe5a78ce..862aac2dd29 100644
--- a/app/controllers/projects/environments_controller.rb
+++ b/app/controllers/projects/environments_controller.rb
@@ -10,7 +10,7 @@ class Projects::EnvironmentsController < Projects::ApplicationController
layout 'project'
- before_action only: [:index] do
+ before_action only: [:index, :show] do
push_frontend_feature_flag(:k8s_watch_api, project)
end
@@ -26,7 +26,7 @@ class Projects::EnvironmentsController < Projects::ApplicationController
before_action :environment, only: [:show, :edit, :update, :stop, :terminal, :terminal_websocket_authorize, :cancel_auto_stop]
before_action :verify_api_request!, only: :terminal_websocket_authorize
before_action :expire_etag_cache, only: [:index], unless: -> { request.format.json? }
- before_action :set_kas_cookie, only: [:index, :folder, :edit, :new], if: -> { current_user && request.format.html? }
+ before_action :set_kas_cookie, only: [:index, :folder, :edit, :new, :show], if: -> { current_user && request.format.html? }
after_action :expire_etag_cache, only: [:cancel_auto_stop]
track_event :index, :folder, :show, :new, :edit, :create, :update, :stop, :cancel_auto_stop, :terminal,
diff --git a/app/models/application_setting.rb b/app/models/application_setting.rb
index e39dc127641..df7889450a8 100644
--- a/app/models/application_setting.rb
+++ b/app/models/application_setting.rb
@@ -721,6 +721,8 @@ class ApplicationSetting < MainClusterwide::ApplicationRecord
attr_encrypted :external_pipeline_validation_service_token, encryption_options_base_32_aes_256_gcm
attr_encrypted :mailgun_signing_key, encryption_options_base_32_aes_256_gcm.merge(encode: false)
attr_encrypted :database_grafana_api_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
+ attr_encrypted :arkose_labs_client_xid, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
+ attr_encrypted :arkose_labs_client_secret, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :arkose_labs_public_api_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :arkose_labs_private_api_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
attr_encrypted :arkose_labs_data_exchange_key, encryption_options_base_32_aes_256_gcm.merge(encode: false, encode_iv: false)
diff --git a/app/models/ci/job_artifact.rb b/app/models/ci/job_artifact.rb
index 30374a52c38..2a65480ff45 100644
--- a/app/models/ci/job_artifact.rb
+++ b/app/models/ci/job_artifact.rb
@@ -14,6 +14,8 @@ module Ci
include EachBatch
include Gitlab::Utils::StrongMemoize
+ ROUTING_FEATURE_FLAG = :ci_partitioning_use_ci_job_artifacts_routing_table
+
self.primary_key = :id
self.sequence_name = :ci_job_artifacts_id_seq
@@ -157,7 +159,10 @@ module Ci
validate :validate_file_format!, unless: :trace?, on: :create
update_project_statistics project_statistics_name: :build_artifacts_size
- partitionable scope: :job
+ partitionable scope: :job, through: {
+ table: :p_ci_job_artifacts,
+ flag: ROUTING_FEATURE_FLAG
+ }
scope :not_expired, -> { where('expire_at IS NULL OR expire_at > ?', Time.current) }
scope :for_sha, ->(sha, project_id) { joins(job: :pipeline).where(ci_pipelines: { sha: sha, project_id: project_id }) }
diff --git a/app/models/ci/pipeline.rb b/app/models/ci/pipeline.rb
index 23c2164135d..40a76233324 100644
--- a/app/models/ci/pipeline.rb
+++ b/app/models/ci/pipeline.rb
@@ -769,7 +769,7 @@ module Ci
::Gitlab::SafeRequestStore.fetch("pipeline:#{self.id}:latest_report_artifacts") do
::Ci::JobArtifact.where(
id: job_artifacts.all_reports
- .select('max(ci_job_artifacts.id) as id')
+ .select("max(#{Ci::JobArtifact.quoted_table_name}.id) as id")
.group(:file_type)
)
.preload(:job)
diff --git a/app/views/projects/environments/show.html.haml b/app/views/projects/environments/show.html.haml
index 46ec430cadb..4c7291a002c 100644
--- a/app/views/projects/environments/show.html.haml
+++ b/app/views/projects/environments/show.html.haml
@@ -5,7 +5,7 @@
- add_page_specific_style 'page_bundles/environments'
- add_page_specific_style 'page_bundles/ci_status'
-#environments-detail-view{ data: { details: environments_detail_data_json(current_user, @project, @environment) } }
+#environments-detail-view{ data: { details: environments_detail_data_json(current_user, @project, @environment), kas_tunnel_url: ::Gitlab::Kas.tunnel_url } }
#environments-detail-view-header
#environment_details_page
diff --git a/config/audit_events/types/feature_flag_created.yml b/config/audit_events/types/feature_flag_created.yml
index 053580879fd..5f0ea103dca 100644
--- a/config/audit_events/types/feature_flag_created.yml
+++ b/config/audit_events/types/feature_flag_created.yml
@@ -7,3 +7,4 @@ feature_category: feature_flags
milestone: '15.10'
saved_to_database: true
streamed: true
+scope: [Project]
diff --git a/config/audit_events/types/feature_flag_deleted.yml b/config/audit_events/types/feature_flag_deleted.yml
index 3de626409d5..10a84380ff7 100644
--- a/config/audit_events/types/feature_flag_deleted.yml
+++ b/config/audit_events/types/feature_flag_deleted.yml
@@ -7,3 +7,4 @@ feature_category: feature_flags
milestone: '15.10'
saved_to_database: true
streamed: true
+scope: [Project]
diff --git a/config/audit_events/types/feature_flag_updated.yml b/config/audit_events/types/feature_flag_updated.yml
index 0314684cb48..f2eda134fee 100644
--- a/config/audit_events/types/feature_flag_updated.yml
+++ b/config/audit_events/types/feature_flag_updated.yml
@@ -7,3 +7,4 @@ feature_category: feature_flags
milestone: '15.10'
saved_to_database: true
streamed: true
+scope: [Project]
diff --git a/config/audit_events/types/manually_trigger_housekeeping.yml b/config/audit_events/types/manually_trigger_housekeeping.yml
index 9005f54151a..1101193a502 100644
--- a/config/audit_events/types/manually_trigger_housekeeping.yml
+++ b/config/audit_events/types/manually_trigger_housekeeping.yml
@@ -7,3 +7,4 @@ feature_category: source_code_management
milestone: '15.9'
saved_to_database: true
streamed: true
+scope: [Project]
diff --git a/config/audit_events/types/project_feature_model_registry_access_level_updated.yml b/config/audit_events/types/project_feature_model_registry_access_level_updated.yml
index 2be827cfcad..1a047d86504 100644
--- a/config/audit_events/types/project_feature_model_registry_access_level_updated.yml
+++ b/config/audit_events/types/project_feature_model_registry_access_level_updated.yml
@@ -7,3 +7,4 @@ feature_category: mlops
milestone: '16.7'
saved_to_database: true
streamed: true
+scope: [Project]
diff --git a/config/feature_flags/gitlab_com_derisk/ci_partitioning_use_ci_job_artifacts_routing_table.yml b/config/feature_flags/gitlab_com_derisk/ci_partitioning_use_ci_job_artifacts_routing_table.yml
new file mode 100644
index 00000000000..e85f84643a3
--- /dev/null
+++ b/config/feature_flags/gitlab_com_derisk/ci_partitioning_use_ci_job_artifacts_routing_table.yml
@@ -0,0 +1,9 @@
+---
+name: ci_partitioning_use_ci_job_artifacts_routing_table
+feature_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/440760
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/144709
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/441759
+milestone: '16.10'
+group: group::pipeline execution
+type: gitlab_com_derisk
+default_enabled: false
diff --git a/config/gitlab_loose_foreign_keys.yml b/config/gitlab_loose_foreign_keys.yml
index c491d45a5f3..aa5582e3a35 100644
--- a/config/gitlab_loose_foreign_keys.yml
+++ b/config/gitlab_loose_foreign_keys.yml
@@ -35,10 +35,6 @@ ci_group_variables:
- table: namespaces
column: group_id
on_delete: async_delete
-ci_job_artifacts:
- - table: projects
- column: project_id
- on_delete: async_delete
ci_job_token_group_scope_links:
- table: users
column: added_by_id
@@ -294,6 +290,10 @@ p_ci_builds_metadata:
- table: projects
column: project_id
on_delete: async_delete
+p_ci_job_artifacts:
+ - table: projects
+ column: project_id
+ on_delete: async_delete
p_ci_runner_machine_builds:
- table: ci_runner_machines
column: runner_machine_id
diff --git a/config/metrics/counts_7d/20210216184939_i_ecosystem_jira_service_close_issue_weekly.yml b/config/metrics/counts_7d/20210216184939_i_ecosystem_jira_service_close_issue_weekly.yml
index 90cceab86db..fb42470593e 100644
--- a/config/metrics/counts_7d/20210216184939_i_ecosystem_jira_service_close_issue_weekly.yml
+++ b/config/metrics/counts_7d/20210216184939_i_ecosystem_jira_service_close_issue_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_jira_service_close_issue_week
description: Number of users closing Jira issues by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: 7d
diff --git a/config/metrics/counts_7d/20210216184943_i_ecosystem_jira_service_cross_reference_weekly.yml b/config/metrics/counts_7d/20210216184943_i_ecosystem_jira_service_cross_reference_weekly.yml
index 70a3b4b0fcb..4ea0c3af989 100644
--- a/config/metrics/counts_7d/20210216184943_i_ecosystem_jira_service_cross_reference_weekly.yml
+++ b/config/metrics/counts_7d/20210216184943_i_ecosystem_jira_service_cross_reference_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_jira_service_cross_reference_
description: Number of users that cross-referenced Jira issues by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: 7d
diff --git a/config/metrics/counts_7d/20210216184955_ecosystem_total_unique_counts_weekly.yml b/config/metrics/counts_7d/20210216184955_ecosystem_total_unique_counts_weekly.yml
index b0745520870..eb25f3a3845 100644
--- a/config/metrics/counts_7d/20210216184955_ecosystem_total_unique_counts_weekly.yml
+++ b/config/metrics/counts_7d/20210216184955_ecosystem_total_unique_counts_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.ecosystem_total_unique_counts_weekly
description: Number of users performing actions on Jira issues by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: 7d
diff --git a/config/metrics/counts_7d/20210302103002_i_ecosystem_slack_service_issue_notification_weekly.yml b/config/metrics/counts_7d/20210302103002_i_ecosystem_slack_service_issue_notification_weekly.yml
index c96e9788e6d..91776260a49 100644
--- a/config/metrics/counts_7d/20210302103002_i_ecosystem_slack_service_issue_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302103002_i_ecosystem_slack_service_issue_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_issue_notificat
description: Calculated unique users to trigger a Slack message by performing an action on an issue by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302103629_i_ecosystem_slack_service_push_notification_weekly.yml b/config/metrics/counts_7d/20210302103629_i_ecosystem_slack_service_push_notification_weekly.yml
index 123d8077a66..83e1df75fc1 100644
--- a/config/metrics/counts_7d/20210302103629_i_ecosystem_slack_service_push_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302103629_i_ecosystem_slack_service_push_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_push_notificati
description: Calculated unique users to trigger a Slack message by performing a Git push by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302103755_i_ecosystem_slack_service_deployment_notification_weekly.yml b/config/metrics/counts_7d/20210302103755_i_ecosystem_slack_service_deployment_notification_weekly.yml
index 4ae6853d08f..dede3daafcf 100644
--- a/config/metrics/counts_7d/20210302103755_i_ecosystem_slack_service_deployment_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302103755_i_ecosystem_slack_service_deployment_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_deployment_noti
description: Calculated unique users to trigger a Slack message by performing a deployment by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302103907_i_ecosystem_slack_service_wiki_page_notification_weekly.yml b/config/metrics/counts_7d/20210302103907_i_ecosystem_slack_service_wiki_page_notification_weekly.yml
index bff00f4f5a7..d307378c7ff 100644
--- a/config/metrics/counts_7d/20210302103907_i_ecosystem_slack_service_wiki_page_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302103907_i_ecosystem_slack_service_wiki_page_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_wiki_page_notif
description: Calculated unique users to trigger a Slack message by performing an action on a wiki page by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302104007_i_ecosystem_slack_service_merge_request_notification_weekly.yml b/config/metrics/counts_7d/20210302104007_i_ecosystem_slack_service_merge_request_notification_weekly.yml
index ef131a81f07..ae30eb439a4 100644
--- a/config/metrics/counts_7d/20210302104007_i_ecosystem_slack_service_merge_request_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302104007_i_ecosystem_slack_service_merge_request_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_merge_request_n
description: Calculated unique users to trigger a Slack message by performing an action on a merge request by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302104047_i_ecosystem_slack_service_note_notification_weekly.yml b/config/metrics/counts_7d/20210302104047_i_ecosystem_slack_service_note_notification_weekly.yml
index 4823a57f853..d05bf537906 100644
--- a/config/metrics/counts_7d/20210302104047_i_ecosystem_slack_service_note_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302104047_i_ecosystem_slack_service_note_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_note_notificati
description: Calculated unique users to trigger a Slack message by creating a note by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302104144_i_ecosystem_slack_service_tag_push_notification_weekly.yml b/config/metrics/counts_7d/20210302104144_i_ecosystem_slack_service_tag_push_notification_weekly.yml
index 3d0123024c0..ae7bc8ecbff 100644
--- a/config/metrics/counts_7d/20210302104144_i_ecosystem_slack_service_tag_push_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302104144_i_ecosystem_slack_service_tag_push_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_tag_push_notifi
description: Calculated unique users to trigger a Slack message by performing a tag push by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302104556_i_ecosystem_slack_service_confidential_note_notification_weekly.yml b/config/metrics/counts_7d/20210302104556_i_ecosystem_slack_service_confidential_note_notification_weekly.yml
index 584b3daa51d..5846c0deb44 100644
--- a/config/metrics/counts_7d/20210302104556_i_ecosystem_slack_service_confidential_note_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302104556_i_ecosystem_slack_service_confidential_note_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_confidential_no
description: Calculated unique users to trigger a Slack message by creating a confidential note by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210302104814_i_ecosystem_slack_service_confidential_issue_notification_weekly.yml b/config/metrics/counts_7d/20210302104814_i_ecosystem_slack_service_confidential_issue_notification_weekly.yml
index f6a1f3c033d..a815e631a08 100644
--- a/config/metrics/counts_7d/20210302104814_i_ecosystem_slack_service_confidential_issue_notification_weekly.yml
+++ b/config/metrics/counts_7d/20210302104814_i_ecosystem_slack_service_confidential_issue_notification_weekly.yml
@@ -4,7 +4,7 @@ key_path: redis_hll_counters.ecosystem.i_ecosystem_slack_service_confidential_is
description: Calculated unique users to trigger a Slack message by performing an action on a confidential issue by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "13.10"
diff --git a/config/metrics/counts_7d/20210916102312_templates_gitlab_slack_application_active.yml b/config/metrics/counts_7d/20210916102312_templates_gitlab_slack_application_active.yml
index 788652bf82a..78d131f6b19 100644
--- a/config/metrics/counts_7d/20210916102312_templates_gitlab_slack_application_active.yml
+++ b/config/metrics/counts_7d/20210916102312_templates_gitlab_slack_application_active.yml
@@ -3,7 +3,7 @@ key_path: counts.templates_gitlab_slack_application_active
description: Count templates with active slack application
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: removed
milestone: "14.3"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_issue_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_issue_notification_weekly.yml
index 35d643068ca..b8a12583ce6 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_issue_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_issue_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_co
description: Calculated unique users to trigger a Slack message by performing an action on a confidential issue by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_note_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_note_notification_weekly.yml
index 5abf36162e4..07662956c2a 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_note_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_confidential_note_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_co
description: Calculated unique users to trigger a Slack message by creating a confidential note by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_deployment_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_deployment_notification_weekly.yml
index 9c3e41827e9..6fa1a3e5d5b 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_deployment_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_deployment_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_de
description: Calculated unique users to trigger a Slack message by performing a deployment by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_issue_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_issue_notification_weekly.yml
index 417308db301..1491ad3a9b4 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_issue_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_issue_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_is
description: Calculated unique users to trigger a Slack message by performing an action on an issue by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_merge_request_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_merge_request_notification_weekly.yml
index f74bf9e4877..4ae4348bfc2 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_merge_request_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_merge_request_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_me
description: Calculated unique users to trigger a Slack message by performing an action on a merge request by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_note_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_note_notification_weekly.yml
index 951954de37b..37fb9eded2b 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_note_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_note_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_no
description: Calculated unique users to trigger a Slack message by creating a note by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_push_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_push_notification_weekly.yml
index 5ad2211cf02..f77f6ab6b6a 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_push_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_push_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_pu
description: Calculated unique users to trigger a Slack message by performing a Git push by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_tag_push_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_tag_push_notification_weekly.yml
index db2e093039f..16dee599b0f 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_tag_push_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_tag_push_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_ta
description: Calculated unique users to trigger a Slack message by performing a tag push by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_wiki_page_notification_weekly.yml b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_wiki_page_notification_weekly.yml
index 6fc9cfa31b7..cadf10c4de4 100644
--- a/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_wiki_page_notification_weekly.yml
+++ b/config/metrics/counts_7d/20221101213233_integrations_gitlab_for_slack_app_wiki_page_notification_weekly.yml
@@ -3,7 +3,7 @@ key_path: redis_hll_counters.integrations.i_integrations_gitlab_for_slack_app_wi
description: Calculated unique users to trigger a Slack message by performing an action on a wiki page by week
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "15.6"
diff --git a/config/metrics/counts_all/20210216175729_groups_inheriting_campfire_active.yml b/config/metrics/counts_all/20210216175729_groups_inheriting_campfire_active.yml
index b38fb1669e0..3267b856ada 100644
--- a/config/metrics/counts_all/20210216175729_groups_inheriting_campfire_active.yml
+++ b/config/metrics/counts_all/20210216175729_groups_inheriting_campfire_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_inheriting_campfire_active
description: Count of active groups inheriting integrations for Campfire
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20210216175756_templates_discord_active.yml b/config/metrics/counts_all/20210216175756_templates_discord_active.yml
index 25049d1497a..a323f417ab0 100644
--- a/config/metrics/counts_all/20210216175756_templates_discord_active.yml
+++ b/config/metrics/counts_all/20210216175756_templates_discord_active.yml
@@ -4,7 +4,7 @@ key_path: counts.templates_discord_active
description: Count of active service templates for Discord
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: removed
milestone_removed: '14.4'
diff --git a/config/metrics/counts_all/20210216175758_instances_discord_active.yml b/config/metrics/counts_all/20210216175758_instances_discord_active.yml
index 7e325fbabc7..a33542adf7b 100644
--- a/config/metrics/counts_all/20210216175758_instances_discord_active.yml
+++ b/config/metrics/counts_all/20210216175758_instances_discord_active.yml
@@ -4,7 +4,7 @@ key_path: counts.instances_discord_active
description: Count of active instance-level integrations for Discord
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20210216175837_projects_flowdock_active.yml b/config/metrics/counts_all/20210216175837_projects_flowdock_active.yml
index edcbc344d87..bb454f74850 100644
--- a/config/metrics/counts_all/20210216175837_projects_flowdock_active.yml
+++ b/config/metrics/counts_all/20210216175837_projects_flowdock_active.yml
@@ -4,7 +4,7 @@ key_path: counts.projects_flowdock_active
description: Count of projects with active integrations for Flowdock
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: removed
removed_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102394
diff --git a/config/metrics/counts_all/20210216175912_groups_hipchat_active.yml b/config/metrics/counts_all/20210216175912_groups_hipchat_active.yml
index e65499a9402..a88796ecc7e 100644
--- a/config/metrics/counts_all/20210216175912_groups_hipchat_active.yml
+++ b/config/metrics/counts_all/20210216175912_groups_hipchat_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_hipchat_active
description: Count of groups with active integrations for HipChat
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: removed
milestone_removed: '13.11'
diff --git a/config/metrics/counts_all/20210216175934_groups_jenkins_active.yml b/config/metrics/counts_all/20210216175934_groups_jenkins_active.yml
index df8ac5fa50b..87296065630 100644
--- a/config/metrics/counts_all/20210216175934_groups_jenkins_active.yml
+++ b/config/metrics/counts_all/20210216175934_groups_jenkins_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_jenkins_active
description: Count of groups with active integrations for Jenkins
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20210216175939_projects_inheriting_jenkins_active.yml b/config/metrics/counts_all/20210216175939_projects_inheriting_jenkins_active.yml
index 69d892c7b3e..fed41d0819d 100644
--- a/config/metrics/counts_all/20210216175939_projects_inheriting_jenkins_active.yml
+++ b/config/metrics/counts_all/20210216175939_projects_inheriting_jenkins_active.yml
@@ -4,7 +4,7 @@ key_path: counts.projects_inheriting_jenkins_active
description: Count of active projects inheriting integrations for Jenkins
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20210216175946_templates_jira_active.yml b/config/metrics/counts_all/20210216175946_templates_jira_active.yml
index ecfc4963f98..97634c7f46e 100644
--- a/config/metrics/counts_all/20210216175946_templates_jira_active.yml
+++ b/config/metrics/counts_all/20210216175946_templates_jira_active.yml
@@ -4,7 +4,7 @@ key_path: counts.templates_jira_active
description: Count of active service templates for Jira
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: removed
milestone_removed: '14.4'
diff --git a/config/metrics/counts_all/20210216180157_groups_unify_circuit_active.yml b/config/metrics/counts_all/20210216180157_groups_unify_circuit_active.yml
index fada3445343..0aef21e6442 100644
--- a/config/metrics/counts_all/20210216180157_groups_unify_circuit_active.yml
+++ b/config/metrics/counts_all/20210216180157_groups_unify_circuit_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_unify_circuit_active
description: Count of groups with active integrations for Unifiy Circuit
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20210216182623_groups_inheriting_ewm_active.yml b/config/metrics/counts_all/20210216182623_groups_inheriting_ewm_active.yml
index 4d6be9b294a..80b0d9bd3cd 100644
--- a/config/metrics/counts_all/20210216182623_groups_inheriting_ewm_active.yml
+++ b/config/metrics/counts_all/20210216182623_groups_inheriting_ewm_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_inheriting_ewm_active
description: Count of active groups inheriting integrations for EWM
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/counts_all/20230515153834_projects_inheriting_clickup_active.yml b/config/metrics/counts_all/20230515153834_projects_inheriting_clickup_active.yml
index 1e45317f2a5..1ac61351c50 100644
--- a/config/metrics/counts_all/20230515153834_projects_inheriting_clickup_active.yml
+++ b/config/metrics/counts_all/20230515153834_projects_inheriting_clickup_active.yml
@@ -3,7 +3,7 @@ key_path: counts.projects_inheriting_clickup_active
description: Count of active projects inheriting integrations for ClickUp
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
milestone: "16.1"
diff --git a/config/metrics/counts_all/20231205140200_groups_inheriting_diffblue_cover_active.yml b/config/metrics/counts_all/20231205140200_groups_inheriting_diffblue_cover_active.yml
index c8d5ceb8393..763a7365c5d 100644
--- a/config/metrics/counts_all/20231205140200_groups_inheriting_diffblue_cover_active.yml
+++ b/config/metrics/counts_all/20231205140200_groups_inheriting_diffblue_cover_active.yml
@@ -4,7 +4,7 @@ key_path: counts.groups_inheriting_diffblue_cover_active
description: Count of active groups inheriting integrations for Diffblue Cover
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: number
status: active
time_frame: all
diff --git a/config/metrics/settings/20210204124908_mattermost_enabled.yml b/config/metrics/settings/20210204124908_mattermost_enabled.yml
index 1e8560ae6fd..6663b0fd233 100644
--- a/config/metrics/settings/20210204124908_mattermost_enabled.yml
+++ b/config/metrics/settings/20210204124908_mattermost_enabled.yml
@@ -4,7 +4,7 @@ key_path: mattermost_enabled
description: Whether Mattermost is enabled
product_section: dev
product_stage: manage
-product_group: integrations
+product_group: import_and_integrate
value_type: boolean
status: active
time_frame: none
diff --git a/db/docs/p_ci_job_artifacts.yml b/db/docs/p_ci_job_artifacts.yml
index 5bb3dc580c7..adca9eca071 100644
--- a/db/docs/p_ci_job_artifacts.yml
+++ b/db/docs/p_ci_job_artifacts.yml
@@ -2,6 +2,7 @@
table_name: p_ci_job_artifacts
classes:
- Ci::JobArtifact
+- Ci::JobArtifact::Partitioned
feature_categories:
- continuous_integration
description: Routing table for ci_job_artifacts
diff --git a/db/migrate/20240212223930_add_arkose_client_api_settings.rb b/db/migrate/20240212223930_add_arkose_client_api_settings.rb
new file mode 100644
index 00000000000..31e9db553bc
--- /dev/null
+++ b/db/migrate/20240212223930_add_arkose_client_api_settings.rb
@@ -0,0 +1,23 @@
+# frozen_string_literal: true
+
+class AddArkoseClientApiSettings < Gitlab::Database::Migration[2.2]
+ milestone '16.10'
+
+ enable_lock_retries!
+
+ def up
+ add_column :application_settings, :encrypted_arkose_labs_client_xid, :binary
+ add_column :application_settings, :encrypted_arkose_labs_client_xid_iv, :binary
+
+ add_column :application_settings, :encrypted_arkose_labs_client_secret, :binary
+ add_column :application_settings, :encrypted_arkose_labs_client_secret_iv, :binary
+ end
+
+ def down
+ remove_column :application_settings, :encrypted_arkose_labs_client_xid, :binary
+ remove_column :application_settings, :encrypted_arkose_labs_client_xid_iv, :binary
+
+ remove_column :application_settings, :encrypted_arkose_labs_client_secret, :binary
+ remove_column :application_settings, :encrypted_arkose_labs_client_secret_iv, :binary
+ end
+end
diff --git a/db/schema_migrations/20240212223930 b/db/schema_migrations/20240212223930
new file mode 100644
index 00000000000..3b274d76a0b
--- /dev/null
+++ b/db/schema_migrations/20240212223930
@@ -0,0 +1 @@
+589d3135f638fa1222b03afa5d08d9be349d8c46c3c7f28fd6576cce43bff202
\ No newline at end of file
diff --git a/db/structure.sql b/db/structure.sql
index fd9027155d6..c4c0aff46c9 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -4079,6 +4079,10 @@ CREATE TABLE application_settings (
enable_member_promotion_management boolean DEFAULT false NOT NULL,
lock_math_rendering_limits_enabled boolean DEFAULT false NOT NULL,
security_approval_policies_limit integer DEFAULT 5 NOT NULL,
+ encrypted_arkose_labs_client_xid bytea,
+ encrypted_arkose_labs_client_xid_iv bytea,
+ encrypted_arkose_labs_client_secret bytea,
+ encrypted_arkose_labs_client_secret_iv bytea,
CONSTRAINT app_settings_container_reg_cleanup_tags_max_list_size_positive CHECK ((container_registry_cleanup_tags_service_max_list_size >= 0)),
CONSTRAINT app_settings_container_registry_pre_import_tags_rate_positive CHECK ((container_registry_pre_import_tags_rate >= (0)::numeric)),
CONSTRAINT app_settings_dep_proxy_ttl_policies_worker_capacity_positive CHECK ((dependency_proxy_ttl_group_policy_worker_capacity >= 0)),
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index e27e26cdbfe..54fe0a0284d 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -16443,50 +16443,6 @@ Machine type used for runner cloud provisioning.
| `name` | [`String`](#string) | Name of the machine type. |
| `zone` | [`String`](#string) | Zone of the machine type. |
-### `CiRunnerCloudProvisioningOptions`
-
-Options for runner cloud provisioning.
-
-#### Fields
-
-| Name | Type | Description |
-| ---- | ---- | ----------- |
-| `regions` | [`CiRunnerCloudProvisioningRegionConnection`](#cirunnercloudprovisioningregionconnection) | Regions available for provisioning a runner. (see [Connections](#connections)) |
-
-#### Fields with arguments
-
-##### `CiRunnerCloudProvisioningOptions.machineTypes`
-
-Machine types available for provisioning a runner.
-
-Returns [`CiRunnerCloudProvisioningMachineTypeConnection`](#cirunnercloudprovisioningmachinetypeconnection).
-
-This field returns a [connection](#connections). It accepts the
-four standard [pagination arguments](#connection-pagination-arguments):
-`before: String`, `after: String`, `first: Int`, and `last: Int`.
-
-###### Arguments
-
-| Name | Type | Description |
-| ---- | ---- | ----------- |
-| `zone` | [`String!`](#string) | Zone for which to retrieve machine types. |
-
-##### `CiRunnerCloudProvisioningOptions.zones`
-
-Zones available for provisioning a runner.
-
-Returns [`CiRunnerCloudProvisioningZoneConnection`](#cirunnercloudprovisioningzoneconnection).
-
-This field returns a [connection](#connections). It accepts the
-four standard [pagination arguments](#connection-pagination-arguments):
-`before: String`, `after: String`, `first: Int`, and `last: Int`.
-
-###### Arguments
-
-| Name | Type | Description |
-| ---- | ---- | ----------- |
-| `region` | [`String`](#string) | Region for which to retrieve zones. Returns all zones if not specified. |
-
### `CiRunnerCloudProvisioningRegion`
Region used for runner cloud provisioning.
@@ -16509,6 +16465,50 @@ Zone used for runner cloud provisioning.
| `description` | [`String`](#string) | Description of the zone. |
| `name` | [`String`](#string) | Name of the zone. |
+### `CiRunnerGoogleCloudProvisioningOptions`
+
+Options for runner Google Cloud provisioning.
+
+#### Fields
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| `regions` | [`CiRunnerCloudProvisioningRegionConnection`](#cirunnercloudprovisioningregionconnection) | Regions available for provisioning a runner. (see [Connections](#connections)) |
+
+#### Fields with arguments
+
+##### `CiRunnerGoogleCloudProvisioningOptions.machineTypes`
+
+Machine types available for provisioning a runner.
+
+Returns [`CiRunnerCloudProvisioningMachineTypeConnection`](#cirunnercloudprovisioningmachinetypeconnection).
+
+This field returns a [connection](#connections). It accepts the
+four standard [pagination arguments](#connection-pagination-arguments):
+`before: String`, `after: String`, `first: Int`, and `last: Int`.
+
+###### Arguments
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| `zone` | [`String!`](#string) | Zone to retrieve machine types for. |
+
+##### `CiRunnerGoogleCloudProvisioningOptions.zones`
+
+Zones available for provisioning a runner.
+
+Returns [`CiRunnerCloudProvisioningZoneConnection`](#cirunnercloudprovisioningzoneconnection).
+
+This field returns a [connection](#connections). It accepts the
+four standard [pagination arguments](#connection-pagination-arguments):
+`before: String`, `after: String`, `first: Int`, and `last: Int`.
+
+###### Arguments
+
+| Name | Type | Description |
+| ---- | ---- | ----------- |
+| `region` | [`String`](#string) | Region to retrieve zones for. Returns all zones if not specified. |
+
### `CiRunnerManager`
#### Fields
@@ -26273,7 +26273,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
##### `Project.runnerCloudProvisioningOptions`
-Options for runner cloud provisioning by a specified cloud provider. Returns `null` if `:google_cloud_runner_provisioning` feature flag is disabled, or the GitLab instance is not a SaaS instance.
+Options for provisioning the runner on Google Cloud. Returns `null` if `:google_cloud_runner_provisioning` feature flag is disabled, or the GitLab instance is not a SaaS instance.
NOTE:
**Introduced** in 16.9.
@@ -26285,6 +26285,7 @@ Returns [`CiRunnerCloudProvisioningOptions`](#cirunnercloudprovisioningoptions).
| Name | Type | Description |
| ---- | ---- | ----------- |
+| `cloudProjectId` | [`String!`](#string) | Identifier of the cloud project. |
| `provider` | [`CiRunnerCloudProvider!`](#cirunnercloudprovider) | Identifier of the cloud provider. |
##### `Project.runners`
@@ -34014,6 +34015,14 @@ abstract types.
### Unions
+#### `CiRunnerCloudProvisioningOptions`
+
+Options for runner cloud provisioning.
+
+One of:
+
+- [`CiRunnerGoogleCloudProvisioningOptions`](#cirunnergooglecloudprovisioningoptions)
+
#### `DependencyLinkMetadata`
Represents metadata associated with a dependency link.
diff --git a/doc/architecture/blueprints/cells/application-deployment.md b/doc/architecture/blueprints/cells/application-deployment.md
index 8af7051f06d..763a4c53e77 100644
--- a/doc/architecture/blueprints/cells/application-deployment.md
+++ b/doc/architecture/blueprints/cells/application-deployment.md
@@ -1,467 +1,11 @@
---
-owning-stage: "~devops::platforms"
-group: Delivery
-description: 'Cells: Application Deployment'
-creation-date: "2024-01-09"
-authors: [ "@nolith", "@skarbek" ]
-coach:
-approvers: []
+redirect_to: 'infrastructure/deployments.md'
+remove_date: '2024-05-16'
---
-Disclaimer: This blueprint requires more cross-functional alignment - [Confidence Level] --> Low
+This document was moved to [another location](infrastructure/deployments.md).
-# Application Deployment with a Cellular Architecture
-
-This blueprint describes a deployment strategy that can support the new scaling dimension intruduced by the Cell Architecture.
-
-The complexity of this transition will demand participation from many team in the Platforms section to take ownership of the features necessary to reach the production grade rating on this architecture.
-
-## Introduction
-
-### Preamble
-
-From an high level perspective, a Cell Cluster is a system made of only 3 items:
-
-1. **Router** - An HA routing system deployed independently from the GitLab application.
-1. **Primary Cell** - The GitLab installation that is the leader for all the cluster wide data and services. This will be the legacy GitLab.com deployment.
-1. Zero or more **Secondary Cells** - GitLab installations authoritative for a limited number of Organizations. Those Cells are deployed using GitLab Dedicated tools.
-
-```plantuml
-@startuml
-
-actor User
-
-cloud router {
-component Router as R
-}
-
-component "Primary Cell" as Primary
-collections "Secondary Cells" as Secondary
-
-User ==> R
-R ==> Primary
-R ==> Secondary
-
-Secondary --> Primary : "Internal API"
-
-@enduml
-```
-
-As we can see from the diagram, users interact with the system through the router only. Secondary Cells communicate with the Primary Cell using internal API and have a local copy of all the database rows necessary to operate.
-
-It is important to note that even if a Secondary Cell supports GitLab Geo out of the box, we will not be able to provide this feature to our users until the Router supports it.
-
-### Key Terms
-
-- Deployment - The GitLab application and its components being installed into infrastructure
-- `auto-deploy` version - The active version that creates a package viable for deployment
-- ring - A logical partition of the cell cluster. In order to deploy to the next ring a package must be validated inside the current ring
-- `perimeter` - the ring marking the "definition of done" for Release Managers, a package validated inside the perimeter is allowed to rollout in the rest of the fleet
-- `graduated` version - The version deemed safe to deploy to cells outside of the perimeter
-- `.com` - refers to our old existing or currently running infrastructure
-- Primary Cell - The GitLab installation that is the leader for all the cluster wide data and services. Initially this will be the legacy GitLab.com deployment. This implicitly includes .com as our legacy infrastructure.
-- Secondary Cell(s) - GitLab installation(s) authoritative for a limited number of Organizations. Cell(s) are deployed using GitLab Dedicated tools.
-
-### Ring deployment
-
-The scale of the Cell project deployment together with the strong user partitioning maps well with a [ring deployment](https://configcat.com/ring-deployment/) approach.
-
-```plantuml
-@startuml
-
-skinparam frame {
- borderColor<> red
-}
-
-left to right direction
-
-frame "Ring 3" as r3 {
- component "Cell4" as c4
- component "Cell5" as c5
- component "Cell6" as c6
- component "Cell7" as c7
- component "Cell8" as c8
- component "Cell9" as c9
-
- frame "Ring 2" as r2 {
- component "Cell1" as c1
- component "Cell2" as c2
- component "Cell3" as c3
-
- frame "Ring 1" <> as r1 {
- frame "Ring 0" as r0 {
- component "Canary stage" <> as cny
- component "QA Cell" as QA
-
- note as ring0_note
- Ring 0 goes in parallel with canary
- QA tests executed on **canary and QA Cell**
- end note
- }
-
- component "Main stage\nPrimary Cell" <> as Primary
-
- note as perimeter_note
- The perimeter marks the definition of done for an auto_deploy package.
- When post-deployment migrations are executed inside the perimeter,
- the package is ready to be pulled by the outer rings
- **outside of the release managers coordinator pipeline**
- end note
- }
-
- note as baking_areas
- A package cannot rollout to the next ring before it is successfully
- installed inside the current ring.
- end note
- }
-}
-
-@enduml
-```
-
-In the image above we are showing a possible ring layout with a cluster made of the Primary Cell and 10 Secondary cells, the upper bound of the Cell 1.0 milestone.
-
-The general rule is that:
-
-1. The deployment process progresses from Ring 0 to the outer rings
-1. Rings are a collection of Cells sharing the same risk factor associated to a deployment.
-1. Deployments can get halted at any stage and the package will not reach the outer rings.
-1. We define the "perimeter" ring that marks the "definition of done" for the Release Managers.
- - Crossing perimeter is the logical point in time of a given package lifecycle after the PDM has successfully run on the Main Stage. Effectively, between Ring 1 and Ring 2 as described throughout this document.
- - A successful run of the Post Deploy Migrations inside the perimeter marks a package as `graduated`.
- - A `graduated` package is a valid candidate for the monthly release.
- - A `graduated` package is rolled out to the rest of the rings automatically.
- - Deployments must be automated: inside the perimeter are responsibility of Release Managers, outside of it are responsibility of Team:Ops.
-
-### Reference materials
-
-- [Cell 1.0 blueprint](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/architecture/blueprints/cells/iterations/cells-1.0.md)
-- [The merge request for this blueprint](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141427)
-- [Delivery Point of View on Cells](https://gitlab.com/gitlab-com/Product/-/issues/12770)
-- [GitLab.com deployment process before Cells](https://gitlab.com/gitlab-com/content-sites/handbook/-/blob/21f6898110466b5c581a881db0ce343bf9cb1a72/content/handbook/engineering/deployments-and-releases/deployments/index.md)
-
-## Goals and Non-Goals
-
-### Goals
-
-- Limit the increase in cognitive load for the release manager; in doing so, we defined the perimeter as the clear handover point where a package is no longer a release manager's responsibility.
-- Limit the blast radius of failures by partitioning the cell cluster into rings, automated validation occurs between each ring.
-- Ensure deployments are reliably automated
-- Ensure automatic handling of failed deployments
-- Provide observability into package rollouts and deployments
-
-### Non-Goals
-
-- Extending `release-tools` to take ownership of the Cell Application Deployments. A smaller, more specific piece of software will allow us to keep the tooling focused on one job.
-- Introduce major changes related to Release Management
-- Lifecycle management of Cells
-- Management of routing traffic to/from Cells
-- Individual component deploys
-
-## Stakeholders
-
-We have several teams partaking in the operations of Cell, the first distinction is between teams implementing and maintaining the tools and teams utilizing those tools.
-
-| Areas | Features | Owners |
-|-----------------------------------|----------------------------------------------------------|---------------------------|
-| Integration with Dedicated tools* | | |
-| | integration with Release Managers' workflows | Delivery:Deployments |
-| | deployment mechanics using `Instrumentor` and `AMP` | Foundations |
-| SSOT for cluster state* | | |
-| | Investigate GitOps model | Delivery:Deployments |
-| | Investigate CRD + operator | Delivery:Deployments |
-| Ring-based deployment automation | | |
-| | propagating changes inside a ring perimeter | Delivery:Deployments |
-| | orchestrating changes propagation outside ring perimeter | Foundations |
-| | emergency brake: stopping a package rollout | Delivery:Deployments |
-| Rollback capabilities | | |
-| | rollback with downtime (for QA Cell in ring 0) | Delivery:Deployments |
-| | delayed PDM for rollback support | Environment Automation* |
-| Cell Lifecycle Automation | | |
-| | Cell reference architecture | Foundations |
-| | Cell dimensioning | Foundations |
-| | Cell provisioning | Foundations |
-| | Cell deprovisioning | Foundations |
-| Observability | | |
-| | Cell health metric | Scalability:Observability |
-| | Fleet health metric | Scalability:Observability |
-| | Paging EOC | Foundations |
-| | Package States | Delivery:Deployments |
-
-> \* These items may require contributions from Delivery:Deployments. This work should be heavily collaborated on as it'll help ensure appropriate alignment to meet the needs of the owning team and customer teams.
-
-The users of those features are the Release Managers, the Engineer On Call, and the Team:Ops.
-The following list define the tasks those groups can perform in the cell cluster:
-
-1. Release Managers
- - Command deployments inside the perimeter
- - Declare "graduated" packages
- - Rollback deployments inside the perimeter
-1. Engineer On Call
- - Receive alerts for failed deployments
- - Can pause a package rollout (not reaching the next ring)
- - Drive investigation for failed deployments
-1. Team:Ops
- - Cells Administration
- - Provisioning
- - Deprovisioning
- - Re-balancing
- - Cell-Ring association
-
-## Requirements
-
-Before we can integrate Secondary Cells to our deployment pipeline, we need a few items immediately:
-
-1. The router should exist, it must be HA, and have an independent deployment pipeline
- - This is required for appropriate testing. As noted below, we'll need a QA cell to direct a deployment to for which QA will execute tests against. A router will need to route QA tests to the appropriate Cell.
-1. Assets Deployment
- - This already exists today for .com. Today this is handled via HAProxy, but with Cells, the routing layer will become the responsible party to redirect assets in a similar fashion.
- - If assets are chosen to be managed differently, this changes both how Delivery need to deploy said assets in order to provide as close to Zero Downtime Upgrades as possible, and configuration to the Cell installation to support routing to assets properly.
-1. Feature Flags
- - We are assuming that the current Feature Flags workflows and tooling will just work on the Primary Cell and that Secondary Cells will not be affected.
- - The use of feature flags to mitigate incidents is limited to only the Primary Cell.
- - Tooling may need to mature to ensure that Cells do not drift for long periods of time with feature flags. This ensures that customers have a similar experience if their work expands across Cells and that we as operators of .com need not worry about version drift and the implications of code differing behind the feature flag.
- - Further guidance, documentation will need to be developed for this area. Engineers shouldn't care what cell an organization is a part of. Thus Feature Flag toggles abstract away the need for engineers to care.
-
-## Proposed plan of action
-
-From a delivery perspective not much changes between the 3 proposed Cells iterations (1.0, 1.5, and 2.0). The split is an iterative approach based on cutting the scope of the features available for Organizations bound to a given Cell. From a deployment point of view, it should be possible to have multiple Secondary Cells from the first iteration so we have to figure out a roadmap to get there that is independent from the Cell architecture version.
-
-### Iterations
-
-#### Cells 1.0
-
-The intent in this iteration is to focus our efforts on building and integrating our own tooling that builds and manages Cells. The following milestones, and their exit criterion, are a collaborative effort of the Platforms section and spans across many teams.
-
-1. The Dedicated technology stack expansion:
- - Instrumentor and AMP support GCP
- - A cell is defined as a reference architecture in Instrumentor
-1. Control Plane for Cells - Cell Cluster Coordinator
- - Switchboard is currently leveraged by Dedicated but is not an appropriate tool for Cells. We should evaluate the capabilities of other tooling created by Dedicated, `amp` and `instrumentor`, to determine how they could be integrated into a deployment workflow.
- - Implement Cell deployment converging the entire infrastructure of the cell (current dedicated capability)
- - Implement the concept of Rings: initially only Rings 0 and 2
-1. First Secondary Cell: the QA Cell in Ring 0
- - Build integration with our current tooling to perform deployments to the QA cell via the Coordinator
- - The QA Cell runs it's own QA smoke tests
- - The QA Cell is updated in parallel with the production canary stage: QA cell failures are considered soft and do not block auto_deploy
-1. Control Plane for Cells - Individual dashboards and alerting
- - observability is at least on par with the legacy infrastructure
- - alerting is at least on par with the legacy infrastructure
-1. First Customer Secondary Cell: Ring 2
- - release-tools can `graduate` a package after the PDM execution
- - the Coordinator can manage Ring 2 deployments
-1. Support for multiple Secondary Cells
- - the Coordinator can converge multiple cells in the same Ring to the desired version
-
-> - Limitations:
-> - all Secondary Cells will be in the same ring, Ring 2
-> - Rollbacks are possible but require downtime to achieve on all secondary cells
-
-#### Cells 1.5 and 2.0
-
-The following features can be distributed between Cell 1.5 and 2.0, they are all improving the operational aspects and we should prioritize them as we learn more about operating Cells.
-
-1. Control Plane for Cells - Additional rings
- - Secondary Cells can be spread over multiple rings
- - Deployment to the next ring starts automatically after the current ring converged
- - Emergency brake: ability to block package rollout to the next ring
-1. The QA Cell becomes a blocker for auto-deploy
-1. Control Plane for Cells - Cluster dashboards and alerting
- - A dashboard should indicate what package is expected for any given Cell and Ring deployment
- - Any cell not running the desired version should be easily visible and alert if not converged in a reasonable amount of time
- - Deployment health metrics to block package rollout inside a ring (z-score on the four golden signals?)
-1. The Post Deploy Migration (PDM) step of deployments needs to be segregated from the application deployment to ensure we have the ability to perform rollbacks on Cells.
- - Without this capability, a Cell must suffer downtime in order for a rollback to complete successfully. This is disruptive and should not be considered a wise solution.
- - The separation of the PDM on the primary Cell already functions as desired. Thus our Primary Cell will have rollbacks as an option to mitigate incidents.
-1. Modified tooling that enables us to target only Deploying the GitLab application. Currently the destined tooling to be leveraged employs a strategy where the entire installation is converged. This includes the infrastructure and the version of GitLab which creates a lengthy CI pipeline and long running jobs.
-1. Automated Rollbacks - if a deployment fails for any reason, a rollback procedure should be initiated automatically to minimize disruption to the affected Cell. We should be able to use a health metric for this.
-
-The focus here is productionalizing what has been built and cleaning up areas of tech debt incurred during the MVP stage of the first iteration.
-
-#### Mindmap
-
-```mermaid
-%%{init: {'theme':'default'}}%%
-mindmap
- root((Cells Deployment))
- Core concepts 📚
- Current .com infra is the Primary Cell
- It is possible to have multiple Cells in Cell 1.0
- Cell isn't an HA solution
- Secondary Cells talks to the Primary Cell using internal API
-
- Prerequisites 🏗️
- router
- HA solution
- independent deployment
- Dedicated
- Support GCP
- Cell reference architecture
-
- Decisions 📔
- Ring style deployment
- Ring 0: Current Canary + a new QA Cell
- Ring 1: Current Main stage
- Ring 2+: New Secondary Cells for customers
- The Perimenter
- Marks the handover point between Release Managers and Team:Ops
- Inside: Ring 0 and 1
- Outside: Ring 2+
- Running PDM inside the perimeter graduates a package
- A graduated pacage is a valid candidate for the monthly release
- We are not porting staging to Cell
- A new QA Cell will validate the package without affecting users
-
- Procedures needing upgrades 🦾
- Rollbacks
- auto-deploy rollout
- Post Deployment Migration
- Deployment health metrics
-
- Risk area ☢️
- There is no dogfooding in Cell 1.0 and 1.5
-```
-
-#### Deployment coordinator and Cell Cluster Coordinator
-
-In the context of `auto deploy` we have an external coordinator pipeline, inside the `release-tools` project, that takes care of orchestrating package generation and rollout invoking the specific tool for each job.
-
-In today's GitLab.com infrastructure, deployments are executed by specific tools (`deployer` and `gitlab-com/k8s-workloads`) that can be independently operated by SRE and Release Managers, with the introduction of the Cell cluster we will face new operational challenges like a simple cluster overview, package rollout status, feature flag configuration, provisioning and deprovisioning.
-
-The GitLab Dedicated stack features its own method of controlling installs of GitLab, primarily through a slew of tools, Switchboard, Amp, and Tenctl. The use of Switchboard is not geared towards Cells and thus cannot be leveraged. Other tooling such as Instrumentor and Amp may have a place or modifications to enable them to be more portable for usage between both the Dedicated team and Cells. We'll need to evaluate these tools, their interactions with Cells, and how we may leverage them. Pending how the work is scheduled, this may be a highly collaborative effort with team members working closely across team boundaries to ensure requirements are met during the initial period or MVP for Cells.
-
-In this paragraph we describe an ideal interaction where a data store is updated with a desired version to be deployed, and a Cell Cluster Coordinator is created to support Cell deployments.
-
-In Cell 1.0, inside the perimeter, we will have a single Secondary Cell, the QA Cell.
-We should expand release-tools to command some-tool to perform a Cell update on demand.
-
-```plantuml
-@startuml
-participant "release-tools" as rt
-participant "Cell Cluster Coordinator" as sb
-participant "AMP Cluster" as AMP
-collections "Secondary Cells" as cells
-participant QA
-
-rt -> sb: update QA Cell
-note right: In parallel with canary stage rollout
-sb -> AMP: schedule deployment job
-AMP -> cells: QA Cell: version rollout
-cells --> AMP
-AMP --> sb
-sb --> rt
-rt --> QA: test QA Cell
-note over rt,QA: It does not replace canary QA
-QA -> cells: run tests
-cells --> QA
-QA --> rt
-@enduml
-```
-
-As we mentioned before, when we run post-deployment migrations in Ring 1, release-tools will mark that version as `graduated` and thus be capable to rollout outside of the perimeter.
-
-Cell Cluster Coordinator will be leveraged to help coordinate automated version upgrades to further rings with automated checks before and after deployments to ensure we are deploying to the correct cells of a desired ring and validate instances are healthy before and after deployments, rolling back in the face of failure, and alerting the appropriate teams as necessary.
-
-```plantuml
-@startuml
-participant "Cell Cluster Coordinator" as sb
-participant "AMP Cluster" as AMP
-collections "Secondary Cells" as cells
-
-loop
-sb -> sb: pull ring-version mapping
-opt version missmatch
-sb -> AMP: schedule configure job
-AMP -> cells: CellX: version rollout
-cells --> AMP
-AMP --> sb
-opt the ring now run the same version
-sb -> sb: promote version to the next ring
-end
-end
-end
-@enduml
-```
-
-### Procedures
-
-#### Auto-Deploy
-
-Auto-deploy shall continue to work as it does today as our Primary Cell is equivalent to our legacy .com infrastructure. Thus our existing procedures related to auto-deploy can still be continued to be leveraged. Think hot-patching, rollbacks, auto-deploy picking, the PDM, the existing auto-deploy schedule, etc. A new procedure will be added to ensure that `release-tools` knows to trigger a deployment after a PDM is executed to the next Ring. Currently `release-tools` doesn't understand anything related to Ring Deployments, this is functionality that will need to be added.
-
-- Auto-deploy is limited to Rings 0 and 1:
- - Ring 0 contains a QA Cell plus the canary stage of the .com infra
- - Ring 1 contains main stage of the .com infra - this is the cut off for release tools
- - All cells will deploy the same way; this eliminates needing to deal with differing deployment technologies
- - `release-tools` will interact with the Coordinator to pilot the deployments to Ring 0 as part of its coordinator pipeline
-- Release-tools must be able to `graduate` a package:
- - A `graduate` version of GitLab is any `auto-deploy` version which has a successful deploy onto the Main Stage of Production and the [Post Deploy Migration (PDM)](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/post_deploy_migration/readme.md) has completed.
- - This could mean we expect to see a single package deploy each day to our Secondary Cells. Currently, the PDM is only run 1 time per day. Note that there are exceptions to this rule.
- - This will enable us to use our existing procedures to remediate high severity incidents where application code may be at fault.
- - We do not want to run official released versions of GitLab as these are produced far slower than auto-deploys thus we risk missing SLA's on incident response. In the cell architecture, most issues should be found in the Primary Cell and fixed prior to being deployed to any Secondary Cell.
- - We'll need new procedures, runbooks, and documentation such that when a problem is found through manual testing, we have some ability to halt deployments of what may be labeled a `graduated` package from actually being deployed.
- - It would be wise to track these failure cases as realistically, QA should be finding issues to enable us to run a automated deployments.
-
-Note that currently, some smaller components deploy themselves to the .com infrastructure. Notably, Zoekt, Container Registry, and Mailroom, have their own cadence of providing newer versions to .com. This aspect will not be carried over into secondary cells, as currently, the tooling we'll leverage does not allow a segregation of components to enable this functionality. Instead, we'll rely on the current defined versions as specified in the default branch which built the `auto-deploy` package. This mimics how our releases are accomplished and thus should carry over well with Cells.
-
-#### Rollbacks
-
-Long term, we should aim to modify the deployment tooling such that Cells are provided a grace period to enable each of them to be able to be safely rolled back in the event of a deployment failure, or mitigating a failure that is noticed post deployment. Currently for the legacy .com or the Primary Cell, we hold the PDM to execute 1 time per day at the discretion of Release Managers. The tooling that performs deployments to Cells currently do not have a way to NOT run the PDM, thus no there does not currently exist a way to rollback without inducing downtime on a particular Cell. Procedures and tooling updates will be required in this area.
-
-#### Hot patching
-
-Hot patching is one source of our ability to mitigate problems. If we rely on `graduate` versions, the hot patcher has no place for secondary cells. It could still be leveraged for our Primary Cell, however. Though, it would be wise if we can eliminate hot patching in favor of safer deployment methodologies.
-
-> For reference, we've only hot patched production 1 time for year 2023.
-
-#### Deployment Health Metrics
-
-Currently we do not automate a deployment to the Main stage of the .com legacy infrastructure, or the Primary Cell. In order to reduce operational overhead we should be able to rely on existing metrics which form a health indicator for a given installation and automatically trigger a deployment at the appropriate time. This deployment health indicator would also need to be carried into each of our cells. Tooling that triggers a deployment at various rings should be made aware to continue or halt a deploy given the status of earlier rings and the health state of the next target ring.
-
-#### Feature Flags
-
-Feature Flags are discussed in [data-stores#83](https://gitlab.com/gitlab-org/enablement-section/data-stores/-/issues/83).
-
-#### Package Rollout Policy
-
-We have an implicit procedure driven by our current use of auto-deploys. This will become more prominent with Cells. As implied in various formats above, auto-deploy shall operate relatively similarly to how it operates today. Cells becomes an addition to the existing `release-tools` pipeline with triggers in differing areas. When and what we trigger will need to be keenly defined. It is expected that Secondary Cells only receive `graduated` versions of GitLab. Thus, we'll leverage the use of our Post Deployment Migration pipeline as the gatekeeper for when a package is considered `graduated`. In an ideal world, when the PDM is executed successfully on the Primary Cell, that package is then considered `graduated` and can be deployed to any outer ring. This same concept is already leveraged when we build releases for self managed customers. This break point is already natural to Release Managers and thus is a good carry over for Cell deployments.
-
-We should aim to deploy to Cells as quickly as possible. For all Cells that exist in a single ring, we should have the ability to deploy in parallel. Doing so minimizes the version drift between Cells and reduces potential issues. If the version drifts too greatly, auto-deploy shall pause itself and an investigation into the reason why we are too far behind begins. Ideally we know about this situation ahead of time. We should aim to be no greater than 1 `graduate` package behind our PDM. Thus the expectation is that for every PDM, is a deployment to our Cells, every day. There are days which the PDM is skipped. We'll need to evaluate on a case-by-case basis why the PDM is halted to determine the detriment this will incur on our Cell deployments.
-
-Rings outside of the perimeter are self-managed by the orchestration engine. Once `release-tools` graduates a package it can forget about it. The orchestration engine will converge the desired GitLab version to all Cell in Ring 2, ther first ring outside of the perimeter, and move to next ring only when all Cells converged.
-
-### FAQ
-
-**Will Developers see indicators on MR's as they are deployed to various Cells?**
-
-No. Our current labeling schema is primarily to showcase that the commit landed in production, the PDM successfully executed, which signals to us that the observed commit is safe for being placed in a release for self-managed customers. Being that after we reach this point, issues with a package should be minimal, there's no need to update issues/MR's with the status as we move forward into our many Rings of deployments. Developers should not need to care what version is deployed to what Cell.
-
-**A P1/S1 issue exists, how do we mitigate this on Cells?**
-
-Cells are still a part of .com, thus our existing [bug](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/#severity-slos) and [vulnerability](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/#remediation-slas) SLA's for remediation apply. We can deploy whatever we want to secondary cells so long as it's considered `graduated`. If a high priority issue comes about, we should be able to freely leverage our existing procedures to update our code base and any given auto-deploy branch for mitigation, and maybe after some extra rounds of testing, or perhaps a slower roll out, we can deploy that auto-deploy package into our cells. This provides us with the same mitigation methods that we leverage today. The problem that this causes is that there could exist some code that may not have been fully vetted. We can still rely on rollbacks in this case and revisit any necessary patch for the next round of auto-deployments and evaluate the fix for another attempt to remediate our cells.
-
-**What changes are expected from a Developers perspective**
-
-Release and Auto-Deploy procedures should largely remain the same. We're shifting where code lands. Any changes in this realm would increase the most the closer we are to Iteration 2.0 when various environments or stages to GitLab begin to change.
-
-**All tiers but one have a failed deploy, what triggers a rollback of that package for all cells?**
-
-This depends on various characteristics that we'll probably want to iterate on and develop processes for. Example, if we fail on the very first cell on the first Tier, we should investigate that cell, but also ensure that this is not systemic to all cells. This can only be handled on a case-by-case basis. If we reach the last tier and last cell and some failure would occur, there should be no reason to rollback any other cell as enough time should have passed by for us to catch application failures.
-
-**What happens with self-managed releases?**
-
-Theoretically not much changes. Currently we use Production, or .com's Main Stage as our proving grounds for changes that are destined to be releasable for self-managed. This does not change as in the Cellular architecture, this notion for this exists in the same place. The vocabulary changes, in this case, a `graduated` package is now considered safe for a release.
-
-**What happens to PreProd**
-
-This instance specifically tests the hybrid installation of a GitLab package and Helm chart when we create release candidates. It's our last step prior to a release being tagged. This is not impacted by the Cells work. Though we may change how preprod is managed.
-
-**What happens with Staging**
-
-Staging is crucial for long term instance testing of a deployment alongside QA. Hypothetically staging could completely go away in favor of a deployment to Tier 0. Reference the above Iteration 3 {+TODO add proper link+}
-
-**What happens to Ops**
-
-No need to change. But if Cell management becomes easy, it would be prudent to make this installation operate as similar as possible to avoid overloading operations teams with unique knowledge for our many instances.
-
-This same answer could be provided for the Dev instance.
+
+
+
+
diff --git a/doc/architecture/blueprints/cells/deployment-architecture.md b/doc/architecture/blueprints/cells/deployment-architecture.md
index b090106d5d7..45f8e7f1128 100644
--- a/doc/architecture/blueprints/cells/deployment-architecture.md
+++ b/doc/architecture/blueprints/cells/deployment-architecture.md
@@ -1,155 +1,11 @@
---
-stage: enablement
-group: Tenant Scale
-description: 'Cells: Deployment Architecture'
+redirect_to: 'rejected/deployment-architecture.md'
+remove_date: '2024-05-16'
---
-# Cells: Deployment Architecture
+This document was moved to [another location](rejected/deployment-architecture.md).
-This section describes the existing deployment architecture
-of GitLab.com and contrasts it with the expected Cells architecture.
-
-## 1. Before Cells - Monolithic architecture
-
-
-
-The diagram represents simplified GitLab.com deployment components before the introduction of a Cells architecture.
-This diagram intentionally misses some services that are not relevant for the architecture overview (Cloudflare, Consul, PgBouncers, ...).
-Those services are considered to be Cell-local, with the exception of Cloudflare.
-
-The component blocks are:
-
-- Separate components that can be deployed independently.
-- Components that are independent from other components and offer a wide range of version compatibility.
-
-The application layer services are:
-
-- Strongly interconnected and require to run the same version of the application.
- Read more in [!131657](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131657#note_1563513431).
-- Each service is run across many nodes and scaled horizontally to provide sufficient throughput.
-- Services that interact with other services using an API (REST, gRPC), Redis or DB.
-
-The dependent services are:
-
-- Updated infrequently and selectively.
-- Might use cloud managed services.
-- Each service is clustered and might be run across different availability zones to provide high availability.
-- Object storage is also accessible directly to users if a pre-signed URL is provided.
-
-## 2. Development Cells - Adapting application to Cellular architecture
-
-
-
-The purpose of **Development Cells** is to model a production-like architecture for the purpose of testing and validating the changes introduced.
-This could be achieved with testing Cells on top of the [Reference Architectures](../../../administration/reference_architectures/index.md).
-Read more in [#425197](https://gitlab.com/gitlab-org/gitlab/-/issues/425197).
-
-The differences compared to [Before Cells](#1-before-cells---monolithic-architecture) are:
-
-- A Routing Service is developed by Cells.
-- Development Cells are meant to be run using a development environment only to allow prototyping of Cells without the overhead of managing all auxiliary services.
-- Development Cells represent a simplified GitLab.com architecture by focusing only on essential services required to be split.
-- Development Cells are not meant to be used in production.
-- Cluster-wide data sharing is done with a read-write connection to the main database of Cell 1: PostgreSQL main database, and Redis user-sessions database.
-
-## 3. Initial Cells deployment - Transforming monolithic architecture to Cells architecture
-
-
-
-The differences compared to [Development Cells](#2-development-cells---adapting-application-to-cellular-architecture) are:
-
-- A Cluster-wide Data Provider is introduced by Cells.
-- The Cluster-wide Data Provider is deployed with Cell 1 to be able to access cluster-wide data directly.
-- The cluster-wide database is isolated from the main PostgreSQL database.
-- A Cluster-wide Data Provider is responsible for storing and sharing user data,
- user sessions (currently stored in Redis sessions cluster), routing information
- and cluster-wide settings across all Cells.
-- Access to the cluster-wide database is done asynchronously:
- - Read access always uses a database replica.
- - A database replica might be deployed with the Cell.
- - Write access uses the dedicated Cluster-wide Data Provider service.
-- Additional Cells are deployed, upgraded and maintained via a [GitLab Dedicated-like](../../../subscriptions/gitlab_dedicated/index.md) control plane.
-- Each Cell aims to run as many services as possible in isolation.
-- A Cell can run its own Gitaly cluster, or can use a shared Gitaly cluster, or both.
- Read more in [!131657](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131657#note_1569151454).
-- Shared Runners provided by GitLab are expected to be run locally on the Cell.
-- Infrastructure components might be shared across the cluster and be used by different Cells.
-- It is undefined whether Elasticsearch service would be better run cluster-wide or Cell-local.
-- Delay the decision how to scale the **GitLab Pages - `gitlab.io`** component.
-- Delay the decision how to scale the **Registry - `registry.gitlab.com`** component.
-
-## 4. Hybrid Cells deployment - Initial complete Cells architecture
-
-
-
-The differences compared to [Initial Cells deployment](#3-initial-cells-deployment---transforming-monolithic-architecture-to-cells-architecture) are:
-
-- Removes coupling of Cell N to Cell 1.
-- The Cluster-wide Data Provider is isolated from Cell 1.
-- The cluster-wide databases (PostgreSQL, Redis) are moved to be run with the Cluster-wide Data Provider.
-- All application data access paths to cluster-wide data use the Cluster-wide Data Provider.
-- Some services are shared across Cells.
-
-## 5. Target Cells - Fully isolated Cells architecture
-
-
-
-The differences compared to [Hybrid Cells deployment](#4-hybrid-cells-deployment---initial-complete-cells-architecture) are:
-
-- The Routing Service is expanded to support [GitLab Pages](../../../user/project/pages/index.md) and [GitLab container registry](../../../user/packages/container_registry/index.md).
-- Each Cell has all services isolated.
-- It is allowed that some Cells will follow a [hybrid architecture](#4-hybrid-cells-deployment---initial-complete-cells-architecture).
-
-## Isolation of Services
-
-Each service can be considered individually regarding its requirements, the risks associated
-with scaling the service, its location (cluster-wide or Cell-local), and impact on our ability to migrate data between Cells.
-
-### Cluster-wide services
-
-| Service | Type | Uses | Description |
-| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
-| **Routing Service** | GitLab-built | Cluster-wide Data Provider | A general purpose routing service that can redirect requests from all GitLab SaaS domains to the Cell |
-| **Cluster-wide Data Provider** | GitLab-built | PostgreSQL, Redis, Event Queue? | Provide user profile and routing information to all clustered services |
-
-As per the architecture, the above services are required to be run cluster-wide:
-
-- Those are additional services that are introduced by the Cells architecture.
-
-### Cell-local services
-
-| Service | Type | Uses | Description |
-| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
-| **Redis Cluster** | Managed service | Disk storage | No problem | Redis is used to hold user sessions, application caches, or Sidekiq queues. Most of that data is only applicable to Cells. |
-| **GitLab Runners Manager** | Managed service | API, uses Google Cloud VM Instances | No problem | Significant changes required to API and execution of CI jobs |
-
-As per the architecture, the above services are required to be run Cell-local:
-
-- The consumer data held by the Cell-local services needs to be migratable to another Cell.
-- The compute generated by the service is substational and is strongly desired to reduce impact of [single Cell failure](goals.md#high-resilience-to-a-single-cell-failure).
-- It is complex to run the service cluster-wide from the Cells architecture perspective.
-
-### Hybrid Services
-
-| Service | | Uses | Migrate from cluster-wide to Cell | Description |
-| ------------------- | --------------- | ------------------------------- | ----------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
-| **GitLab Pages** | GitLab-built | Routing Service, Rails API | No problem | Serving CI generated pages under `.gitlab.io` or custom domains |
-| **GitLab Registry** | GitLab-built | Object Storage, PostgreSQL | Non-trivial data migration in case of split | Service to provide GitLab container registry |
-| **Gitaly Cluster** | GitLab-built | Disk storage, PostgreSQL | No problem: Built-in migration routines to balance Gitaly nodes | Gitaly holds Git repository data. Many Gitaly clusters can be configured in application. |
-| **Elasticsearch** | Managed service | Many nodes required by sharding | Time consuming: Rebuild cluster from scratch | Search across all projects |
-| **Object Storage** | Managed service | | Not straightforward: Rather hard to selectively migrate between buckets | Holds all user and CI uploaded files that is served by GitLab |
-
-As per the architecture, the above services are allowed to be run either cluster-wide or Cell-local:
-
-- The ability to run hybrid services cluster-wide might reduce the amount of work to migrate data between Cells due to some services being shared.
-- The hybrid services that are run cluster-wide might negatively impact Cell availability and resiliency due to increased impact caused by [single Cell failure](goals.md#high-resilience-to-a-single-cell-failure).
-
-| Service | Type | Uses | Description |
-| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
-| **Elasticsearch** | Managed service | Many nodes requires by sharding | Time consuming: Rebuild cluster from scratch | Search across all projects |
-| **Object Storage** | Managed service | | Not straightforward: Rather hard to selectively migrate between buckets | Holds all user and CI uploaded files that is served by GitLab |
-
-As per the architecture, the above services are allowed to be run either cluster-wide or Cell-local:
-
-- The ability to run above services cluster-wide might reduce the amount of work to migrate data between Cells due to some services being shared.
-- The hybrid services that are run cluster-wide might negatively impact Cell availability and resiliency due to increased impact caused by [single Cell failure](goals.md#high-resilience-to-a-single-cell-failure).
+
+
+
+
diff --git a/doc/architecture/blueprints/cells/index.md b/doc/architecture/blueprints/cells/index.md
index 2f0bdcb33f3..43dcdc85741 100644
--- a/doc/architecture/blueprints/cells/index.md
+++ b/doc/architecture/blueprints/cells/index.md
@@ -32,10 +32,6 @@ For more information about Cells, see also:
See [Goals, Glossary and Requirements](goals.md).
-## Deployment Architecture
-
-See [Deployment Architecture](deployment-architecture.md).
-
## Work streams
We can't ship the entire Cells architecture in one go - it is too large.
@@ -192,9 +188,9 @@ flowchart TD
See [Cells: Routing Service](routing-service.md).
-### 4. Cell deployment
+### 4. Infrastructure
-See [Cell: Application deployment](application-deployment.md).
+See [Cell: Infrastructure](infrastructure/index.md).
### 5. Migration
@@ -222,7 +218,7 @@ We are following the [Support for Experiment, Beta, and Generally Available feat
Expectations:
-- We can deploy a Cell on staging or another testing environment by using a separate domain (for example `cell2.staging.gitlab.com`) using [Cell deployment](#4-cell-deployment) tooling.
+- We can deploy a Cell on staging or another testing environment by using a separate domain (for example `cell2.staging.gitlab.com`) using [infrastucture](#4-infrastructure) tooling.
- User can create Organization, Group and Project, and run some of the [workflows](#2-workflows).
- It is not expected to be able to run a router to serve all requests under a single domain.
- We expect data loss of data stored on additional Cells.
diff --git a/doc/architecture/blueprints/cells/infrastructure/deployments.md b/doc/architecture/blueprints/cells/infrastructure/deployments.md
new file mode 100644
index 00000000000..9df84bcc368
--- /dev/null
+++ b/doc/architecture/blueprints/cells/infrastructure/deployments.md
@@ -0,0 +1,417 @@
+---
+owning-stage: "~devops::platforms"
+group: Delivery
+description: 'Cells: Application Deployment'
+creation-date: "2024-01-09"
+authors: [ "@nolith", "@skarbek" ]
+coach:
+approvers: []
+---
+
+Disclaimer: This blueprint requires more cross-functional alignment - [Confidence Level] --> Low
+
+# Application Deployment with a Cellular Architecture
+
+This blueprint describes a deployment strategy that can support the new scaling dimension intruduced by the Cell Architecture.
+
+The complexity of this transition will demand participation from many team in the Platforms section to take ownership of the features necessary to reach the production grade rating on this architecture.
+
+## Introduction
+
+### Preamble
+
+From an high level perspective, a Cell Cluster is a system made of only 3 items:
+
+1. **Router** - An HA routing system deployed independently from the GitLab application.
+1. **Primary Cell** - The GitLab installation that is the leader for all the cluster wide data and services. This will be the legacy GitLab.com deployment.
+1. Zero or more **Secondary Cells** - GitLab installations authoritative for a limited number of Organizations. Those Cells are deployed using GitLab Dedicated tools.
+
+```plantuml
+@startuml
+
+actor User
+
+cloud router {
+component Router as R
+}
+
+component "Primary Cell" as Primary
+collections "Secondary Cells" as Secondary
+
+User ==> R
+R ==> Primary
+R ==> Secondary
+
+Secondary --> Primary : "Internal API"
+
+@enduml
+```
+
+As we can see from the diagram, users interact with the system through the router only. Secondary Cells communicate with the Primary Cell using internal API and have a local copy of all the database rows necessary to operate.
+
+It is important to note that even if a Secondary Cell supports GitLab Geo out of the box, we will not be able to provide this feature to our users until the Router supports it.
+
+### Key Terms
+
+- Deployment - The GitLab application and its components being installed into infrastructure
+- `auto-deploy` version - The active version that creates a package viable for deployment
+- ring - A logical partition of the cell cluster. In order to deploy to the next ring a package must be validated inside the current ring
+- `perimeter` - the ring marking the "definition of done" for Release Managers, a package validated inside the perimeter is allowed to rollout in the rest of the fleet
+- `graduated` version - The version deemed safe to deploy to cells outside of the perimeter
+- `.com` - refers to our old existing or currently running infrastructure
+- Primary Cell - The GitLab installation that is the leader for all the cluster wide data and services. Initially this will be the legacy GitLab.com deployment. This implicitly includes .com as our legacy infrastructure.
+- Secondary Cell(s) - GitLab installation(s) authoritative for a limited number of Organizations. Cell(s) are deployed using GitLab Dedicated tools.
+
+### Ring deployment
+
+The scale of the Cell project deployment together with the strong user partitioning maps well with a [ring deployment](https://configcat.com/ring-deployment/) approach.
+
+```plantuml
+@startuml
+
+skinparam frame {
+ borderColor<> red
+}
+
+left to right direction
+
+frame "Ring 3" as r3 {
+ component "Cell4" as c4
+ component "Cell5" as c5
+ component "Cell6" as c6
+ component "Cell7" as c7
+ component "Cell8" as c8
+ component "Cell9" as c9
+
+ frame "Ring 2" as r2 {
+ component "Cell1" as c1
+ component "Cell2" as c2
+ component "Cell3" as c3
+
+ frame "Ring 1" <> as r1 {
+ frame "Ring 0" as r0 {
+ component "Canary stage" <> as cny
+ component "QA Cell" as QA
+
+ note as ring0_note
+ Ring 0 goes in parallel with canary
+ QA tests executed on **canary and QA Cell**
+ end note
+ }
+
+ component "Main stage\nPrimary Cell" <> as Primary
+
+ note as perimeter_note
+ The perimeter marks the definition of done for an auto_deploy package.
+ When post-deployment migrations are executed inside the perimeter,
+ the package is ready to be pulled by the outer rings
+ **outside of the release managers coordinator pipeline**
+ end note
+ }
+
+ note as baking_areas
+ A package cannot rollout to the next ring before it is successfully
+ installed inside the current ring.
+ end note
+ }
+}
+
+@enduml
+```
+
+In the image above we are showing a possible ring layout with a cluster made of the Primary Cell and 10 Secondary cells, the upper bound of the Cell 1.0 milestone.
+
+The general rule is that:
+
+1. The deployment process progresses from Ring 0 to the outer rings
+1. Rings are a collection of Cells sharing the same risk factor associated to a deployment.
+1. Deployments can get halted at any stage and the package will not reach the outer rings.
+1. We define the "perimeter" ring that marks the "definition of done" for the Release Managers.
+ - Crossing perimeter is the logical point in time of a given package lifecycle after the PDM has successfully run on the Main Stage. Effectively, between Ring 1 and Ring 2 as described throughout this document.
+ - A successful run of the Post Deploy Migrations inside the perimeter marks a package as `graduated`.
+ - A `graduated` package is a valid candidate for the monthly release.
+ - A `graduated` package is rolled out to the rest of the rings automatically.
+ - Deployments must be automated: inside the perimeter are responsibility of Release Managers, outside of it are responsibility of Team:Ops.
+
+### Reference materials
+
+- [Cell 1.0 blueprint](https://gitlab.com/gitlab-org/gitlab/-/blob/master/doc/architecture/blueprints/cells/iterations/cells-1.0.md)
+- [The merge request for this blueprint](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/141427)
+- [Delivery Point of View on Cells](https://gitlab.com/gitlab-com/Product/-/issues/12770)
+- [GitLab.com deployment process before Cells](https://gitlab.com/gitlab-com/content-sites/handbook/-/blob/21f6898110466b5c581a881db0ce343bf9cb1a72/content/handbook/engineering/deployments-and-releases/deployments/index.md)
+
+## Goals and Non-Goals
+
+### Goals
+
+- Limit the increase in cognitive load for the release manager; in doing so, we defined the perimeter as the clear handover point where a package is no longer a release manager's responsibility.
+- Limit the blast radius of failures by partitioning the cell cluster into rings, automated validation occurs between each ring.
+- Ensure deployments are reliably automated
+- Ensure automatic handling of failed deployments
+- Provide observability into package rollouts and deployments
+
+### Non-Goals
+
+- Extending `release-tools` to take ownership of the Cell Application Deployments. A smaller, more specific piece of software will allow us to keep the tooling focused on one job.
+- Introduce major changes related to Release Management
+- Lifecycle management of Cells
+- Management of routing traffic to/from Cells
+- Individual component deploys
+
+## Requirements
+
+Before we can integrate Secondary Cells to our deployment pipeline, we need a few items immediately:
+
+1. The router should exist, it must be HA, and have an independent deployment pipeline
+ - This is required for appropriate testing. As noted below, we'll need a QA cell to direct a deployment to for which QA will execute tests against. A router will need to route QA tests to the appropriate Cell.
+1. Assets Deployment
+ - This already exists today for .com. Today this is handled via HAProxy, but with Cells, the routing layer will become the responsible party to redirect assets in a similar fashion.
+ - If assets are chosen to be managed differently, this changes both how Delivery need to deploy said assets in order to provide as close to Zero Downtime Upgrades as possible, and configuration to the Cell installation to support routing to assets properly.
+1. Feature Flags
+ - We are assuming that the current Feature Flags workflows and tooling will just work on the Primary Cell and that Secondary Cells will not be affected.
+ - The use of feature flags to mitigate incidents is limited to only the Primary Cell.
+ - Tooling may need to mature to ensure that Cells do not drift for long periods of time with feature flags. This ensures that customers have a similar experience if their work expands across Cells and that we as operators of .com need not worry about version drift and the implications of code differing behind the feature flag.
+ - Further guidance, documentation will need to be developed for this area. Engineers shouldn't care what cell an organization is a part of. Thus Feature Flag toggles abstract away the need for engineers to care.
+
+## Proposed plan of action
+
+From a delivery perspective not much changes between the 3 proposed Cells iterations (1.0, 1.5, and 2.0). The split is an iterative approach based on cutting the scope of the features available for Organizations bound to a given Cell. From a deployment point of view, it should be possible to have multiple Secondary Cells from the first iteration so we have to figure out a roadmap to get there that is independent from the Cell architecture version.
+
+### Iterations
+
+#### Cells 1.0
+
+The intent in this iteration is to focus our efforts on building and integrating our own tooling that builds and manages Cells. The following milestones, and their exit criterion, are a collaborative effort of the Platforms section and spans across many teams.
+
+1. The Dedicated technology stack expansion:
+ - Instrumentor and AMP support GCP
+ - A cell is defined as a reference architecture in Instrumentor
+1. Control Plane for Cells - Cell Cluster Coordinator
+ - Switchboard is currently leveraged by Dedicated but is not an appropriate tool for Cells. We should evaluate the capabilities of other tooling created by Dedicated, `amp` and `instrumentor`, to determine how they could be integrated into a deployment workflow.
+ - Implement Cell deployment converging the entire infrastructure of the cell (current dedicated capability)
+ - Implement the concept of Rings: initially only Rings 0 and 2
+1. First Secondary Cell: the QA Cell in Ring 0
+ - Build integration with our current tooling to perform deployments to the QA cell via the Coordinator
+ - The QA Cell runs it's own QA smoke tests
+ - The QA Cell is updated in parallel with the production canary stage: QA cell failures are considered soft and do not block auto_deploy
+1. Control Plane for Cells - Individual dashboards and alerting
+ - observability is at least on par with the legacy infrastructure
+ - alerting is at least on par with the legacy infrastructure
+1. First Customer Secondary Cell: Ring 2
+ - release-tools can `graduate` a package after the PDM execution
+ - the Coordinator can manage Ring 2 deployments
+1. Support for multiple Secondary Cells
+ - the Coordinator can converge multiple cells in the same Ring to the desired version
+
+> - Limitations:
+> - all Secondary Cells will be in the same ring, Ring 2
+> - Rollbacks are possible but require downtime to achieve on all secondary cells
+
+#### Cells 1.5 and 2.0
+
+The following features can be distributed between Cell 1.5 and 2.0, they are all improving the operational aspects and we should prioritize them as we learn more about operating Cells.
+
+1. Control Plane for Cells - Additional rings
+ - Secondary Cells can be spread over multiple rings
+ - Deployment to the next ring starts automatically after the current ring converged
+ - Emergency brake: ability to block package rollout to the next ring
+1. The QA Cell becomes a blocker for auto-deploy
+1. Control Plane for Cells - Cluster dashboards and alerting
+ - A dashboard should indicate what package is expected for any given Cell and Ring deployment
+ - Any cell not running the desired version should be easily visible and alert if not converged in a reasonable amount of time
+ - Deployment health metrics to block package rollout inside a ring (z-score on the four golden signals?)
+1. The Post Deploy Migration (PDM) step of deployments needs to be segregated from the application deployment to ensure we have the ability to perform rollbacks on Cells.
+ - Without this capability, a Cell must suffer downtime in order for a rollback to complete successfully. This is disruptive and should not be considered a wise solution.
+ - The separation of the PDM on the primary Cell already functions as desired. Thus our Primary Cell will have rollbacks as an option to mitigate incidents.
+1. Modified tooling that enables us to target only Deploying the GitLab application. Currently the destined tooling to be leveraged employs a strategy where the entire installation is converged. This includes the infrastructure and the version of GitLab which creates a lengthy CI pipeline and long running jobs.
+1. Automated Rollbacks - if a deployment fails for any reason, a rollback procedure should be initiated automatically to minimize disruption to the affected Cell. We should be able to use a health metric for this.
+
+The focus here is productionalizing what has been built and cleaning up areas of tech debt incurred during the MVP stage of the first iteration.
+
+#### Mindmap
+
+```mermaid
+%%{init: {'theme':'default'}}%%
+mindmap
+ root((Cells Deployment))
+ Core concepts 📚
+ Current .com infra is the Primary Cell
+ It is possible to have multiple Cells in Cell 1.0
+ Cell isn't an HA solution
+ Secondary Cells talks to the Primary Cell using internal API
+
+ Prerequisites 🏗️
+ router
+ HA solution
+ independent deployment
+ Dedicated
+ Support GCP
+ Cell reference architecture
+
+ Decisions 📔
+ Ring style deployment
+ Ring 0: Current Canary + a new QA Cell
+ Ring 1: Current Main stage
+ Ring 2+: New Secondary Cells for customers
+ The Perimenter
+ Marks the handover point between Release Managers and Team:Ops
+ Inside: Ring 0 and 1
+ Outside: Ring 2+
+ Running PDM inside the perimeter graduates a package
+ A graduated pacage is a valid candidate for the monthly release
+ We are not porting staging to Cell
+ A new QA Cell will validate the package without affecting users
+
+ Procedures needing upgrades 🦾
+ Rollbacks
+ auto-deploy rollout
+ Post Deployment Migration
+ Deployment health metrics
+
+ Risk area ☢️
+ There is no dogfooding in Cell 1.0 and 1.5
+```
+
+#### Deployment coordinator and Cell Cluster Coordinator
+
+In the context of `auto deploy` we have an external coordinator pipeline, inside the `release-tools` project, that takes care of orchestrating package generation and rollout invoking the specific tool for each job.
+
+In today's GitLab.com infrastructure, deployments are executed by specific tools (`deployer` and `gitlab-com/k8s-workloads`) that can be independently operated by SRE and Release Managers, with the introduction of the Cell cluster we will face new operational challenges like a simple cluster overview, package rollout status, feature flag configuration, provisioning and deprovisioning.
+
+The GitLab Dedicated stack features its own method of controlling installs of GitLab, primarily through a slew of tools, Switchboard, Amp, and Tenctl. The use of Switchboard is not geared towards Cells and thus cannot be leveraged. Other tooling such as Instrumentor and Amp may have a place or modifications to enable them to be more portable for usage between both the Dedicated team and Cells. We'll need to evaluate these tools, their interactions with Cells, and how we may leverage them. Pending how the work is scheduled, this may be a highly collaborative effort with team members working closely across team boundaries to ensure requirements are met during the initial period or MVP for Cells.
+
+In this paragraph we describe an ideal interaction where a data store is updated with a desired version to be deployed, and a Cell Cluster Coordinator is created to support Cell deployments.
+
+In Cell 1.0, inside the perimeter, we will have a single Secondary Cell, the QA Cell.
+We should expand release-tools to command some-tool to perform a Cell update on demand.
+
+```plantuml
+@startuml
+participant "release-tools" as rt
+participant "Cell Cluster Coordinator" as sb
+participant "AMP Cluster" as AMP
+collections "Secondary Cells" as cells
+participant QA
+
+rt -> sb: update QA Cell
+note right: In parallel with canary stage rollout
+sb -> AMP: schedule deployment job
+AMP -> cells: QA Cell: version rollout
+cells --> AMP
+AMP --> sb
+sb --> rt
+rt --> QA: test QA Cell
+note over rt,QA: It does not replace canary QA
+QA -> cells: run tests
+cells --> QA
+QA --> rt
+@enduml
+```
+
+As we mentioned before, when we run post-deployment migrations in Ring 1, release-tools will mark that version as `graduated` and thus be capable to rollout outside of the perimeter.
+
+Cell Cluster Coordinator will be leveraged to help coordinate automated version upgrades to further rings with automated checks before and after deployments to ensure we are deploying to the correct cells of a desired ring and validate instances are healthy before and after deployments, rolling back in the face of failure, and alerting the appropriate teams as necessary.
+
+```plantuml
+@startuml
+participant "Cell Cluster Coordinator" as sb
+participant "AMP Cluster" as AMP
+collections "Secondary Cells" as cells
+
+loop
+sb -> sb: pull ring-version mapping
+opt version missmatch
+sb -> AMP: schedule configure job
+AMP -> cells: CellX: version rollout
+cells --> AMP
+AMP --> sb
+opt the ring now run the same version
+sb -> sb: promote version to the next ring
+end
+end
+end
+@enduml
+```
+
+### Procedures
+
+#### Auto-Deploy
+
+Auto-deploy shall continue to work as it does today as our Primary Cell is equivalent to our legacy .com infrastructure. Thus our existing procedures related to auto-deploy can still be continued to be leveraged. Think hot-patching, rollbacks, auto-deploy picking, the PDM, the existing auto-deploy schedule, etc. A new procedure will be added to ensure that `release-tools` knows to trigger a deployment after a PDM is executed to the next Ring. Currently `release-tools` doesn't understand anything related to Ring Deployments, this is functionality that will need to be added.
+
+- Auto-deploy is limited to Rings 0 and 1:
+ - Ring 0 contains a QA Cell plus the canary stage of the .com infra
+ - Ring 1 contains main stage of the .com infra - this is the cut off for release tools
+ - All cells will deploy the same way; this eliminates needing to deal with differing deployment technologies
+ - `release-tools` will interact with the Coordinator to pilot the deployments to Ring 0 as part of its coordinator pipeline
+- Release-tools must be able to `graduate` a package:
+ - A `graduate` version of GitLab is any `auto-deploy` version which has a successful deploy onto the Main Stage of Production and the [Post Deploy Migration (PDM)](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/post_deploy_migration/readme.md) has completed.
+ - This could mean we expect to see a single package deploy each day to our Secondary Cells. Currently, the PDM is only run 1 time per day. Note that there are exceptions to this rule.
+ - This will enable us to use our existing procedures to remediate high severity incidents where application code may be at fault.
+ - We do not want to run official released versions of GitLab as these are produced far slower than auto-deploys thus we risk missing SLA's on incident response. In the cell architecture, most issues should be found in the Primary Cell and fixed prior to being deployed to any Secondary Cell.
+ - We'll need new procedures, runbooks, and documentation such that when a problem is found through manual testing, we have some ability to halt deployments of what may be labeled a `graduated` package from actually being deployed.
+ - It would be wise to track these failure cases as realistically, QA should be finding issues to enable us to run a automated deployments.
+
+Note that currently, some smaller components deploy themselves to the .com infrastructure. Notably, Zoekt, Container Registry, and Mailroom, have their own cadence of providing newer versions to .com. This aspect will not be carried over into secondary cells, as currently, the tooling we'll leverage does not allow a segregation of components to enable this functionality. Instead, we'll rely on the current defined versions as specified in the default branch which built the `auto-deploy` package. This mimics how our releases are accomplished and thus should carry over well with Cells.
+
+#### Rollbacks
+
+Long term, we should aim to modify the deployment tooling such that Cells are provided a grace period to enable each of them to be able to be safely rolled back in the event of a deployment failure, or mitigating a failure that is noticed post deployment. Currently for the legacy .com or the Primary Cell, we hold the PDM to execute 1 time per day at the discretion of Release Managers. The tooling that performs deployments to Cells currently do not have a way to NOT run the PDM, thus no there does not currently exist a way to rollback without inducing downtime on a particular Cell. Procedures and tooling updates will be required in this area.
+
+#### Hot patching
+
+Hot patching is one source of our ability to mitigate problems. If we rely on `graduate` versions, the hot patcher has no place for secondary cells. It could still be leveraged for our Primary Cell, however. Though, it would be wise if we can eliminate hot patching in favor of safer deployment methodologies.
+
+> For reference, we've only hot patched production 1 time for year 2023.
+
+#### Deployment Health Metrics
+
+Currently we do not automate a deployment to the Main stage of the .com legacy infrastructure, or the Primary Cell. In order to reduce operational overhead we should be able to rely on existing metrics which form a health indicator for a given installation and automatically trigger a deployment at the appropriate time. This deployment health indicator would also need to be carried into each of our cells. Tooling that triggers a deployment at various rings should be made aware to continue or halt a deploy given the status of earlier rings and the health state of the next target ring.
+
+#### Feature Flags
+
+Feature Flags are discussed in [data-stores#83](https://gitlab.com/gitlab-org/enablement-section/data-stores/-/issues/83).
+
+#### Package Rollout Policy
+
+We have an implicit procedure driven by our current use of auto-deploys. This will become more prominent with Cells. As implied in various formats above, auto-deploy shall operate relatively similarly to how it operates today. Cells becomes an addition to the existing `release-tools` pipeline with triggers in differing areas. When and what we trigger will need to be keenly defined. It is expected that Secondary Cells only receive `graduated` versions of GitLab. Thus, we'll leverage the use of our Post Deployment Migration pipeline as the gatekeeper for when a package is considered `graduated`. In an ideal world, when the PDM is executed successfully on the Primary Cell, that package is then considered `graduated` and can be deployed to any outer ring. This same concept is already leveraged when we build releases for self managed customers. This break point is already natural to Release Managers and thus is a good carry over for Cell deployments.
+
+We should aim to deploy to Cells as quickly as possible. For all Cells that exist in a single ring, we should have the ability to deploy in parallel. Doing so minimizes the version drift between Cells and reduces potential issues. If the version drifts too greatly, auto-deploy shall pause itself and an investigation into the reason why we are too far behind begins. Ideally we know about this situation ahead of time. We should aim to be no greater than 1 `graduate` package behind our PDM. Thus the expectation is that for every PDM, is a deployment to our Cells, every day. There are days which the PDM is skipped. We'll need to evaluate on a case-by-case basis why the PDM is halted to determine the detriment this will incur on our Cell deployments.
+
+Rings outside of the perimeter are self-managed by the orchestration engine. Once `release-tools` graduates a package it can forget about it. The orchestration engine will converge the desired GitLab version to all Cell in Ring 2, ther first ring outside of the perimeter, and move to next ring only when all Cells converged.
+
+### FAQ
+
+**Will Developers see indicators on MR's as they are deployed to various Cells?**
+
+No. Our current labeling schema is primarily to showcase that the commit landed in production, the PDM successfully executed, which signals to us that the observed commit is safe for being placed in a release for self-managed customers. Being that after we reach this point, issues with a package should be minimal, there's no need to update issues/MR's with the status as we move forward into our many Rings of deployments. Developers should not need to care what version is deployed to what Cell.
+
+**A P1/S1 issue exists, how do we mitigate this on Cells?**
+
+Cells are still a part of .com, thus our existing [bug](https://handbook.gitlab.com/handbook/engineering/infrastructure/engineering-productivity/issue-triage/#severity-slos) and [vulnerability](https://handbook.gitlab.com/handbook/security/threat-management/vulnerability-management/#remediation-slas) SLA's for remediation apply. We can deploy whatever we want to secondary cells so long as it's considered `graduated`. If a high priority issue comes about, we should be able to freely leverage our existing procedures to update our code base and any given auto-deploy branch for mitigation, and maybe after some extra rounds of testing, or perhaps a slower roll out, we can deploy that auto-deploy package into our cells. This provides us with the same mitigation methods that we leverage today. The problem that this causes is that there could exist some code that may not have been fully vetted. We can still rely on rollbacks in this case and revisit any necessary patch for the next round of auto-deployments and evaluate the fix for another attempt to remediate our cells.
+
+**What changes are expected from a Developers perspective**
+
+Release and Auto-Deploy procedures should largely remain the same. We're shifting where code lands. Any changes in this realm would increase the most the closer we are to Iteration 2.0 when various environments or stages to GitLab begin to change.
+
+**All tiers but one have a failed deploy, what triggers a rollback of that package for all cells?**
+
+This depends on various characteristics that we'll probably want to iterate on and develop processes for. Example, if we fail on the very first cell on the first Tier, we should investigate that cell, but also ensure that this is not systemic to all cells. This can only be handled on a case-by-case basis. If we reach the last tier and last cell and some failure would occur, there should be no reason to rollback any other cell as enough time should have passed by for us to catch application failures.
+
+**What happens with self-managed releases?**
+
+Theoretically not much changes. Currently we use Production, or .com's Main Stage as our proving grounds for changes that are destined to be releasable for self-managed. This does not change as in the Cellular architecture, this notion for this exists in the same place. The vocabulary changes, in this case, a `graduated` package is now considered safe for a release.
+
+**What happens to PreProd**
+
+This instance specifically tests the hybrid installation of a GitLab package and Helm chart when we create release candidates. It's our last step prior to a release being tagged. This is not impacted by the Cells work. Though we may change how preprod is managed.
+
+**What happens with Staging**
+
+Staging is crucial for long term instance testing of a deployment alongside QA. Hypothetically staging could completely go away in favor of a deployment to Tier 0. Reference the above Iteration 3 {+TODO add proper link+}
+
+**What happens to Ops**
+
+No need to change. But if Cell management becomes easy, it would be prudent to make this installation operate as similar as possible to avoid overloading operations teams with unique knowledge for our many instances.
+
+This same answer could be provided for the Dev instance.
diff --git a/doc/architecture/blueprints/cells/infrastructure/index.md b/doc/architecture/blueprints/cells/infrastructure/index.md
new file mode 100644
index 00000000000..55233096020
--- /dev/null
+++ b/doc/architecture/blueprints/cells/infrastructure/index.md
@@ -0,0 +1,318 @@
+---
+stage: core platform
+group: Tenant Scale
+description: 'Cells: Infrastructure'
+authors: [ "@sxuereb" ]
+coach: [ "@andrewn" ]
+status: proposed
+---
+
+# Cells: Infrastructure
+
+## Pre-reads
+
+1. [Cells Iteration](../index.md#cells-iterations), specifically `Cells 1.0`
+1. [GitLab Dedicated](https://about.gitlab.com/dedicated/)
+1. [GitLab Dedicated Architecture](https://gitlab-com.gitlab.io/gl-infra/gitlab-dedicated/team/architecture/Architecture.html)
+
+## Philosophy
+
+- **Cell local by default**: All services should be cell local, and not global unless there are documented and good reasons why they aren't.
+ If we keep things cell local communication between the cell and service stays internal, the service has to run at a smaller scale, and the blast radius is much smaller.
+ Example, Gitaly and GitLab Registry are cell local.
+- **homogeneous environments**: For now, every GitLab cell should look the same. Bootstrapping and provisioning should be done in an automated way.
+ For the first iteration all Cells is the same size, there are benefits of running different sizes but this adds complexity, and scope.
+- **Fresh start, but not so much**: Brand new GitLab instances are created, so it's tempting to redo everything. We have to balance the existing infrastructure, dedicated tooling, and time.
+- **All operations get rolled out the same**: Configuration changes, Feature Flags, Deployments, and operational tasks ideally go through the same process of rolling out a change.
+ Having 1 way of doing things can bring efficiencies and a single source of truth for automation.
+- **Centralize Tooling**: We have a lot of tooling to manage GitLab.com and separate tooling for GitLab Dedicated,
+ which creates silos, duplication of effort and less portability.
+ We have to provision multiple Cells for GitLab.com we need new tooling, GitLab Dedicated built tooling just for this reason.
+ We should try use this tooling as much as possible, if there are things we don't agree with we should try [disagree, commit, and disagree](https://handbook.gitlab.com/handbook/values/#disagree-commit-and-disagree) to improve a single tool.
+ It is ok to start with tooling that has shortcomings, an iterative approach leads to _one_ mature product instead of two.
+
+## Glossary/[Ubiquitous Language](https://martinfowler.com/bliki/UbiquitousLanguage.html)
+
+- `Provision`: When we create a new Cell. Example; We _provisioned_ Cell 5, which is a brand new Cell.
+- `Deploy`: When we change the running code inside of an existing Cell. Example; We _deployed_ the new auto-deploy version on GitLab.com.
+ - [Blueprint](deployments.md)
+- `Configuration change`: When we change any configuration on the application or infrastructure. Example; We did a _configuration change_ on labels added to VMs.
+- `Cell`: A single unit, and instance of GitLab. Not used to refer to Dedicated, where an instance of GitLab is called a Tenant.
+- `Cluster`: A collection of Cells, and the existing GitLab.com infrastructure. Example; We need to change the version of Registry in the Cluster.
+- `Fleet`: The collection of all SaaS environments, both single-tenant and multi-tenant, that collectively form our production environments.
+ This includes existing GitLab.com infrastructure, Cells, and Dedicated.
+
+## Architecture
+
+Below is the Cell architecture you can find the current GitLab.com architecture (pre-Cells) in
+
+```plantuml
+@startuml
+skinparam actorStyle awesome
+skinparam frame {
+ borderColor<> #4285F4
+}
+skinparam frame {
+ borderColor<> #F4B400
+}
+skinparam frame {
+ borderColor<> #0F9D58
+}
+skinparam frame {
+ borderColor<> #DB4437
+}
+skinparam cloud {
+ borderColor<> #F48120
+}
+
+:User:
+cloud gitlab.com <> {
+ [DoS Protection]-->[WAF]
+ [WAF]-->[RoutingService]
+}
+cloud "cloud.gitlab.com" <> {
+ [Cloud Connector]-->[AI Gateway]
+}
+:User:->gitlab.com
+:User:->cloud.gitlab.com
+
+frame "Google Cloud Platform" <> {
+ frame "Cell Cluster" <> {
+ frame "gitlab-production" <>{
+ frame "gprd (Shared VPC Network)" <> as gprdVPC {
+ rectangle "Frontend" as primaryFrontend {
+ node "HAProxy"
+ }
+
+ rectangle "Compute" as primaryCompute {
+ node zonal [
+ zonal cluster x3
+ ===
+ api
+ ---
+ web
+ ---
+ git
+ ---
+ gitlab-shell
+ ---
+ websockets
+ ---
+ registry
+ ]
+
+ node regional [
+ regional x1
+ ===
+ sidekiq
+ ---
+ kas
+ ---
+ zoekt
+ ---
+ cny
+ ---
+ pages
+ ]
+ }
+
+ rectangle "Storage" as primaryStorage {
+ database "patroni-main"
+ database "patroni-ci"
+ database "patroni-registry"
+ database "redis (multiples)"
+ file "object storage" as primaryObjectStorage
+ }
+
+ primaryFrontend <--> primaryCompute
+ primaryCompute <--> primaryStorage
+ }
+ }
+
+ frame "gitlab-ci" <> {
+ node "runner managers" as runnerManager
+
+ runnerManager --> "HAProxy"
+ }
+ frame "gitlab-ci-*" <> {
+ node "ephemeral VMs"
+ }
+ runnerManager --> "gitlab-ci-*"
+
+ frame "gitlab-gitaly-gprd-*" <> {
+ file "gitaly-[1,9]" as primaryGitaly
+ }
+ primaryCompute <--> primaryGitaly
+ primaryGitaly .r[#F4B400].* gprdVPC
+
+ frame "gitlab-gprd-cell-1" <> {
+ node cell1gke [
+ GKE
+ ===
+ webservice
+ ---
+ gitlab-shell
+ ---
+ registry
+ ---
+ sidekiq
+ ]
+
+ rectangle "Storage" as cell1Storage {
+ database "Postgres" as cell1Postgres
+ database "Redis" as cell1Redis
+ file "object storage" as cell1ObjectStorage
+ file "gitaly" as cell1Gitaly
+ }
+
+ cell1gke <--> cell1Storage
+ }
+
+ frame "gitlab-gprd-cell-2" <> {
+ node cell2gke [
+ GKE
+ ===
+ webservice
+ ---
+ gitlab-shell
+ ---
+ registry
+ ---
+ sidekiq
+ ]
+
+ rectangle "Storage" as cell2Storage {
+ database "Postgres" as cell2Postgres
+ database "Redis" as cell2Redis
+ file "object storage" as cell2ObjectStorage
+ file "gitaly" as cell2Gitaly
+ }
+
+ cell2gke <--> cell2Storage
+ }
+
+ "gitlab-gprd-cell-2" .r[#F4B400].* gprdVPC
+ "gitlab-gprd-cell-1" .r[#F4B400].* gprdVPC
+ }
+
+ "Cell Cluster" -u-> cloud.gitlab.com
+}
+
+[RoutingService]-[thickness=3]->primaryFrontend
+[RoutingService]-[thickness=3]->cell1gke
+[RoutingService]-[thickness=3]->cell2gke
+@enduml
+```
+
+## Large Domains
+
+The infrastructure is multifaceted and all teams have a role in setting up the cell infrastructure.
+
+The `Confidence` column is to get a sense of how confident we are with the specific domain and it's path forward for Cells.
+When we have a blueprint merged ideally the confidence should move to 👍 because we have a blueprint that provides direction to that domain.
+
+| Domain | Owner | Blueprint | Confidence |
+|----------------------------------|-----------------------------------|--------------------------------------|------------|
+| Routing | group::tenant scale | [Blueprint](../routing-service.md) | 👍 |
+| Cell Control Plane | group::Delivery/team::Foundations | To-Do | 👎 |
+| Cell Sizing | team::Scalability-Observability | To-Do | 👎 |
+| CI Runners | team::Scalability-Practices | To-Do | 👎 |
+| Databases | team::Database Reliability | To-Do | 👎 |
+| Deployments | group::Delivery | [Blueprint](deployments.md) | 👍 |
+| Observability | team::Scalability-Observability | To-Do | 👎 |
+| Cell Architecture and Tooling | team::Foundations | To-Do | 👎 |
+| Provisioning | team::Foundations | To-Do | 👎 |
+| Configuration Management/Rollout | team::Foundations | To-Do | 👎 |
+
+```plantuml
+@startuml
+skinparam component {
+ BackgroundColor White
+ BorderColor Black
+}
+
+rectangle "Domains as Downstream Dependencies" #line.dashed {
+ component "Control Plane"
+ component "Cell Sizing"
+ component "Databases"
+ component "Routing"
+ component "CI Runners"
+ component "Cell Architecture and tooling"
+}
+
+component "Deployments"
+component "Observability"
+component "Provisioning"
+component "Configuration Management"
+
+"Deployments" -d-> "Control Plane" : Ring Definition
+"Deployments" -d-> "Provisioning": Cell needs to exist to deploy
+"Configuration Management" -d-> "Control Plane": Ring Definition
+"Provisioning" -d-> "Cell Sizing": Size we are going to Provision
+"Provisioning" -d-> "Databases": Database to provision
+"Provisioning" -d-> "Observability": Observability infrastructure part of provisioning
+"Provisioning" -d-> "CI Runners": How to provision CI Runners
+"Provisioning" -d-> "Cell Architecture and tooling": What to provision
+"Observability" -d-> "Provisioning": Cell needs to exist to observe
+"Configuration Management" -d-> "Provisioning": Cell needs to exist to configure
+
+@enduml
+```
+
+## Stakeholders
+
+We have several teams partaking in the operations of Cell.
+The first distinction is between teams implementing and maintaining the tools and teams using those tools.
+
+| Areas | Features | Owners |
+|---------------------------------------------------|-----------------------------------------------------------|---------------------------------|
+| Integration with Dedicated tools* | | |
+| | Integration with Release Managers' workflows | team::Delivery-Deployments |
+| | Deployment mechanics using `Instrumentor` and `AMP` | team::Foundations |
+| | Cell application reference architectures and overlays | team::Ops |
+| | Cell bootstrapping, tooling and supporting infrastructure | team::Ops |
+| | Cell deprovisioning | team::Ops |
+| Control Plane for cluster state** | | |
+| | Investigate GitOps model | team::Delivery-Deployments |
+| | Investigate `CRD` + operator | team::Delivery-Deployments |
+| Ring-based deployment automation | | |
+| | Propagating changes inside a ring perimeter | team::Delivery-Deployments |
+| | Orchestrating changes propagation outside ring perimeter | team::Foundations |
+| | Emergency brake: stopping a package rollout | team::Delivery-Deployments |
+| Rollback capabilities | | |
+| | Rollback with downtime (for QA Cell in ring 0) | team::Delivery-Deployments |
+| | Delayed Post Deploy Migrations for rollback support | team::Environment Automation |
+| Observability | | |
+| | Cell health metric | team::Scalability-Observability |
+| | Fleet health metric | team::Scalability-Observability |
+| | Package States | team::Delivery-Deployments |
+| Incident Lifecycle Management | | |
+| | Paging Engineer On Call | team::Ops |
+| | Incident tooling | team::Ops |
+| Network Edge | | |
+| | Web Application Firewall | team::Foundations |
+| | CDN | team::Foundations |
+| | Load Balancing and networking | team::Foundations |
+| | Rate Limiting | team::Foundations |
+
+> \* These items may require contributions from various stakeholders in SaaS Platforms and Core Platform. This work should be heavily collaborated on as to help ensure appropriate alignment to meet the needs of the owning team and customer teams.
+>
+> \*\* These items are for consideration after Cell 2.0 iteration .
+
+The users of those features are the Release Managers, the Engineer On Call, and the Team:Ops.
+The following list define the tasks those groups can perform in the cell cluster:
+
+1. Release Managers
+ - Command deployments inside the perimeter
+ - Declare "graduated" packages
+ - Rollback deployments inside the perimeter
+1. Engineer On Call
+ - Receive alerts for failed deployments
+ - Can pause a package rollout (not reaching the next ring)
+ - Drive investigation for failed deployments
+1. Team::Ops
+ - Cells Bootstrapping
+ - Provisioning
+ - Deprovisioning
+ - Re-balancing
+ - Cell-Ring association
diff --git a/doc/architecture/blueprints/cells/iterations/cells-1.0.md b/doc/architecture/blueprints/cells/iterations/cells-1.0.md
index df0924e3e2b..d3661467b27 100644
--- a/doc/architecture/blueprints/cells/iterations/cells-1.0.md
+++ b/doc/architecture/blueprints/cells/iterations/cells-1.0.md
@@ -536,7 +536,7 @@ We would have to ensure that the JWT token signed by GitLab is in a form that ca
The Primary Cell in fact serves as a cluster-wide service. Depending on our intent it could be named the following:
- Primary Cell: To clearly state that the Primary Cell has a special purpose today, but we rename it later.
- - Cluster-wide Data Provider: This is the current name used in the [Deployment Architecture](../deployment-architecture.md).
+ - Cluster-wide Data Provider
- Global Service: Alternative name to Cluster-wide Data Provider, indicating that the Primary Cell would implement a Global Service today.
1. How are secrets are generated?
diff --git a/doc/architecture/blueprints/cells/rejected/deployment-architecture.md b/doc/architecture/blueprints/cells/rejected/deployment-architecture.md
new file mode 100644
index 00000000000..dd18382bd56
--- /dev/null
+++ b/doc/architecture/blueprints/cells/rejected/deployment-architecture.md
@@ -0,0 +1,158 @@
+---
+stage: enablement
+group: Tenant Scale
+description: 'Cells: Deployment Architecture'
+status: rejected
+---
+
+_This blueprint was surpassed by the [infrastructure blueprint](../infrastructure/index.md)_
+
+# Cells: Deployment Architecture
+
+This section describes the existing deployment architecture
+of GitLab.com and contrasts it with the expected Cells architecture.
+
+## 1. Before Cells - Monolithic architecture
+
+
+
+The diagram represents simplified GitLab.com deployment components before the introduction of a Cells architecture.
+This diagram intentionally misses some services that are not relevant for the architecture overview (Cloudflare, Consul, PgBouncers, ...).
+Those services are considered to be Cell-local, with the exception of Cloudflare.
+
+The component blocks are:
+
+- Separate components that can be deployed independently.
+- Components that are independent from other components and offer a wide range of version compatibility.
+
+The application layer services are:
+
+- Strongly interconnected and require to run the same version of the application.
+ Read more in [!131657](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131657#note_1563513431).
+- Each service is run across many nodes and scaled horizontally to provide sufficient throughput.
+- Services that interact with other services using an API (REST, gRPC), Redis or DB.
+
+The dependent services are:
+
+- Updated infrequently and selectively.
+- Might use cloud managed services.
+- Each service is clustered and might be run across different availability zones to provide high availability.
+- Object storage is also accessible directly to users if a pre-signed URL is provided.
+
+## 2. Development Cells - Adapting application to Cellular architecture
+
+
+
+The purpose of **Development Cells** is to model a production-like architecture for the purpose of testing and validating the changes introduced.
+This could be achieved with testing Cells on top of the [Reference Architectures](../../../../administration/reference_architectures/index.md).
+Read more in [#425197](https://gitlab.com/gitlab-org/gitlab/-/issues/425197).
+
+The differences compared to [Before Cells](#1-before-cells---monolithic-architecture) are:
+
+- A Routing Service is developed by Cells.
+- Development Cells are meant to be run using a development environment only to allow prototyping of Cells without the overhead of managing all auxiliary services.
+- Development Cells represent a simplified GitLab.com architecture by focusing only on essential services required to be split.
+- Development Cells are not meant to be used in production.
+- Cluster-wide data sharing is done with a read-write connection to the main database of Cell 1: PostgreSQL main database, and Redis user-sessions database.
+
+## 3. Initial Cells deployment - Transforming monolithic architecture to Cells architecture
+
+
+
+The differences compared to [Development Cells](#2-development-cells---adapting-application-to-cellular-architecture) are:
+
+- A Cluster-wide Data Provider is introduced by Cells.
+- The Cluster-wide Data Provider is deployed with Cell 1 to be able to access cluster-wide data directly.
+- The cluster-wide database is isolated from the main PostgreSQL database.
+- A Cluster-wide Data Provider is responsible for storing and sharing user data,
+ user sessions (currently stored in Redis sessions cluster), routing information
+ and cluster-wide settings across all Cells.
+- Access to the cluster-wide database is done asynchronously:
+ - Read access always uses a database replica.
+ - A database replica might be deployed with the Cell.
+ - Write access uses the dedicated Cluster-wide Data Provider service.
+- Additional Cells are deployed, upgraded and maintained via a [GitLab Dedicated-like](../../../../subscriptions/gitlab_dedicated/index.md) control plane.
+- Each Cell aims to run as many services as possible in isolation.
+- A Cell can run its own Gitaly cluster, or can use a shared Gitaly cluster, or both.
+ Read more in [!131657](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/131657#note_1569151454).
+- Shared Runners provided by GitLab are expected to be run locally on the Cell.
+- Infrastructure components might be shared across the cluster and be used by different Cells.
+- It is undefined whether Elasticsearch service would be better run cluster-wide or Cell-local.
+- Delay the decision how to scale the **GitLab Pages - `gitlab.io`** component.
+- Delay the decision how to scale the **Registry - `registry.gitlab.com`** component.
+
+## 4. Hybrid Cells deployment - Initial complete Cells architecture
+
+
+
+The differences compared to [Initial Cells deployment](#3-initial-cells-deployment---transforming-monolithic-architecture-to-cells-architecture) are:
+
+- Removes coupling of Cell N to Cell 1.
+- The Cluster-wide Data Provider is isolated from Cell 1.
+- The cluster-wide databases (PostgreSQL, Redis) are moved to be run with the Cluster-wide Data Provider.
+- All application data access paths to cluster-wide data use the Cluster-wide Data Provider.
+- Some services are shared across Cells.
+
+## 5. Target Cells - Fully isolated Cells architecture
+
+
+
+The differences compared to [Hybrid Cells deployment](#4-hybrid-cells-deployment---initial-complete-cells-architecture) are:
+
+- The Routing Service is expanded to support [GitLab Pages](../../../../user/project/pages/index.md) and [GitLab container registry](../../../../user/packages/container_registry/index.md).
+- Each Cell has all services isolated.
+- It is allowed that some Cells will follow a [hybrid architecture](#4-hybrid-cells-deployment---initial-complete-cells-architecture).
+
+## Isolation of Services
+
+Each service can be considered individually regarding its requirements, the risks associated
+with scaling the service, its location (cluster-wide or Cell-local), and impact on our ability to migrate data between Cells.
+
+### Cluster-wide services
+
+| Service | Type | Uses | Description |
+| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
+| **Routing Service** | GitLab-built | Cluster-wide Data Provider | A general purpose routing service that can redirect requests from all GitLab SaaS domains to the Cell |
+| **Cluster-wide Data Provider** | GitLab-built | PostgreSQL, Redis, Event Queue? | Provide user profile and routing information to all clustered services |
+
+As per the architecture, the above services are required to be run cluster-wide:
+
+- Those are additional services that are introduced by the Cells architecture.
+
+### Cell-local services
+
+| Service | Type | Uses | Description |
+| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
+| **Redis Cluster** | Managed service | Disk storage | No problem | Redis is used to hold user sessions, application caches, or Sidekiq queues. Most of that data is only applicable to Cells. |
+| **GitLab Runners Manager** | Managed service | API, uses Google Cloud VM Instances | No problem | Significant changes required to API and execution of CI jobs |
+
+As per the architecture, the above services are required to be run Cell-local:
+
+- The consumer data held by the Cell-local services needs to be migratable to another Cell.
+- The compute generated by the service is substational and is strongly desired to reduce impact of [single Cell failure](../goals.md#high-resilience-to-a-single-cell-failure).
+- It is complex to run the service cluster-wide from the Cells architecture perspective.
+
+### Hybrid Services
+
+| Service | | Uses | Migrate from cluster-wide to Cell | Description |
+| ------------------- | --------------- | ------------------------------- | ----------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- |
+| **GitLab Pages** | GitLab-built | Routing Service, Rails API | No problem | Serving CI generated pages under `.gitlab.io` or custom domains |
+| **GitLab Registry** | GitLab-built | Object Storage, PostgreSQL | Non-trivial data migration in case of split | Service to provide GitLab container registry |
+| **Gitaly Cluster** | GitLab-built | Disk storage, PostgreSQL | No problem: Built-in migration routines to balance Gitaly nodes | Gitaly holds Git repository data. Many Gitaly clusters can be configured in application. |
+| **Elasticsearch** | Managed service | Many nodes required by sharding | Time consuming: Rebuild cluster from scratch | Search across all projects |
+| **Object Storage** | Managed service | | Not straightforward: Rather hard to selectively migrate between buckets | Holds all user and CI uploaded files that is served by GitLab |
+
+As per the architecture, the above services are allowed to be run either cluster-wide or Cell-local:
+
+- The ability to run hybrid services cluster-wide might reduce the amount of work to migrate data between Cells due to some services being shared.
+- The hybrid services that are run cluster-wide might negatively impact Cell availability and resiliency due to increased impact caused by [single Cell failure](../goals.md#high-resilience-to-a-single-cell-failure).
+
+| Service | Type | Uses | Description |
+| ------------------------------ | ------------ | ------------------------------- | --------------------------------------------------------------------------------------------------- |
+| **Elasticsearch** | Managed service | Many nodes requires by sharding | Time consuming: Rebuild cluster from scratch | Search across all projects |
+| **Object Storage** | Managed service | | Not straightforward: Rather hard to selectively migrate between buckets | Holds all user and CI uploaded files that is served by GitLab |
+
+As per the architecture, the above services are allowed to be run either cluster-wide or Cell-local:
+
+- The ability to run above services cluster-wide might reduce the amount of work to migrate data between Cells due to some services being shared.
+- The hybrid services that are run cluster-wide might negatively impact Cell availability and resiliency due to increased impact caused by [single Cell failure](../goals.md#high-resilience-to-a-single-cell-failure).
diff --git a/doc/architecture/blueprints/cells/diagrams/deployment-before-cells.drawio.png b/doc/architecture/blueprints/cells/rejected/diagrams/deployment-before-cells.drawio.png
similarity index 100%
rename from doc/architecture/blueprints/cells/diagrams/deployment-before-cells.drawio.png
rename to doc/architecture/blueprints/cells/rejected/diagrams/deployment-before-cells.drawio.png
diff --git a/doc/architecture/blueprints/cells/diagrams/deployment-development-cells.drawio.png b/doc/architecture/blueprints/cells/rejected/diagrams/deployment-development-cells.drawio.png
similarity index 100%
rename from doc/architecture/blueprints/cells/diagrams/deployment-development-cells.drawio.png
rename to doc/architecture/blueprints/cells/rejected/diagrams/deployment-development-cells.drawio.png
diff --git a/doc/architecture/blueprints/cells/diagrams/deployment-hybrid-cells.drawio.png b/doc/architecture/blueprints/cells/rejected/diagrams/deployment-hybrid-cells.drawio.png
similarity index 100%
rename from doc/architecture/blueprints/cells/diagrams/deployment-hybrid-cells.drawio.png
rename to doc/architecture/blueprints/cells/rejected/diagrams/deployment-hybrid-cells.drawio.png
diff --git a/doc/architecture/blueprints/cells/diagrams/deployment-initial-cells.drawio.png b/doc/architecture/blueprints/cells/rejected/diagrams/deployment-initial-cells.drawio.png
similarity index 100%
rename from doc/architecture/blueprints/cells/diagrams/deployment-initial-cells.drawio.png
rename to doc/architecture/blueprints/cells/rejected/diagrams/deployment-initial-cells.drawio.png
diff --git a/doc/architecture/blueprints/cells/diagrams/deployment-target-cells.drawio.png b/doc/architecture/blueprints/cells/rejected/diagrams/deployment-target-cells.drawio.png
similarity index 100%
rename from doc/architecture/blueprints/cells/diagrams/deployment-target-cells.drawio.png
rename to doc/architecture/blueprints/cells/rejected/diagrams/deployment-target-cells.drawio.png
diff --git a/doc/architecture/blueprints/cells/routing-service.md b/doc/architecture/blueprints/cells/routing-service.md
index d310e744ca5..6dc7fb8876d 100644
--- a/doc/architecture/blueprints/cells/routing-service.md
+++ b/doc/architecture/blueprints/cells/routing-service.md
@@ -9,7 +9,7 @@ status: accepted
This document describes design goals and architecture of Routing Service
used by Cells. To better understand where the Routing Service fits
-into architecture take a look at [Deployment Architecture](deployment-architecture.md).
+into architecture take a look at [Infrastructure Architecture](infrastructure/index.md#architecture).
## Goals
diff --git a/lib/generators/batched_background_migration/templates/batched_background_migration_job_spec.template b/lib/generators/batched_background_migration/templates/batched_background_migration_job_spec.template
index b05695ffa3b..857636a182a 100644
--- a/lib/generators/batched_background_migration/templates/batched_background_migration_job_spec.template
+++ b/lib/generators/batched_background_migration/templates/batched_background_migration_job_spec.template
@@ -2,6 +2,6 @@
require 'spec_helper'
-RSpec.describe Gitlab::BackgroundMigration::<%= class_name %>, feature_category: :<%= feature_category %> do # rubocop:disable Layout/LineLength
+RSpec.describe Gitlab::BackgroundMigration::<%= class_name %>, feature_category: :<%= feature_category %> do
# Tests go here
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 098964d19a8..eb1f9c3c53d 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -19364,6 +19364,9 @@ msgstr ""
msgid "Environments|Kubernetes namespace (optional)"
msgstr ""
+msgid "Environments|Kubernetes overview"
+msgstr ""
+
msgid "Environments|Kustomizations"
msgstr ""
@@ -19499,12 +19502,18 @@ msgstr ""
msgid "Environment|Forbidden to access the cluster agent from this environment."
msgstr ""
+msgid "Environment|Get started"
+msgstr ""
+
msgid "Environment|Healthy"
msgstr ""
msgid "Environment|Kubernetes overview"
msgstr ""
+msgid "Environment|No Kubernetes clusters configured"
+msgstr ""
+
msgid "Environment|Pods"
msgstr ""
@@ -19526,6 +19535,9 @@ msgstr ""
msgid "Environment|Sync status"
msgstr ""
+msgid "Environment|There are no Kubernetes cluster connections configured for this environment. Connect a cluster to add the status of your workloads, resources, and the Flux reconciliation state to the dashboard. %{linkStart}Learn more about Kubernetes integration.%{linkEnd}"
+msgstr ""
+
msgid "Environment|There was an error connecting to the cluster agent."
msgstr ""
diff --git a/package.json b/package.json
index 3e935430f36..4721b2d0887 100644
--- a/package.json
+++ b/package.json
@@ -125,7 +125,7 @@
"clipboard": "^2.0.8",
"compression-webpack-plugin": "^5.0.2",
"copy-webpack-plugin": "^6.4.1",
- "core-js": "^3.35.1",
+ "core-js": "^3.36.0",
"cron-validator": "^1.1.1",
"cronstrue": "^1.122.0",
"cropperjs": "^1.6.1",
diff --git a/spec/controllers/projects/environments_controller_spec.rb b/spec/controllers/projects/environments_controller_spec.rb
index c421aee88f8..e9d724f2bec 100644
--- a/spec/controllers/projects/environments_controller_spec.rb
+++ b/spec/controllers/projects/environments_controller_spec.rb
@@ -280,6 +280,24 @@ RSpec.describe Projects::EnvironmentsController, feature_category: :continuous_d
let(:request_params) { environment_params }
let(:target_id) { 'users_visiting_environments_pages' }
end
+
+ it 'sets the kas cookie if the request format is html' do
+ allow(::Gitlab::Kas::UserAccess).to receive(:enabled?).and_return(true)
+ get :show, params: environment_params
+
+ expect(
+ request.env['action_dispatch.cookies'][Gitlab::Kas::COOKIE_KEY]
+ ).to be_present
+ end
+
+ it 'does not set the kas_cookie if the request format is not html' do
+ allow(::Gitlab::Kas::UserAccess).to receive(:enabled?).and_return(true)
+ get :show, params: environment_params(format: :json)
+
+ expect(
+ request.env['action_dispatch.cookies'][Gitlab::Kas::COOKIE_KEY]
+ ).to be_nil
+ end
end
context 'with invalid id' do
diff --git a/spec/db/schema_spec.rb b/spec/db/schema_spec.rb
index 898926ec51f..9a0a4587e6e 100644
--- a/spec/db/schema_spec.rb
+++ b/spec/db/schema_spec.rb
@@ -49,6 +49,7 @@ RSpec.describe 'Database schema', feature_category: :database do
chat_names: %w[chat_id team_id user_id],
chat_teams: %w[team_id],
ci_builds: %w[project_id runner_id user_id erased_by_id trigger_request_id partition_id auto_canceled_by_partition_id],
+ ci_job_artifacts: %w[partition_id project_id job_id],
ci_namespace_monthly_usages: %w[namespace_id],
ci_pipeline_artifacts: %w[partition_id],
ci_pipeline_chat_data: %w[partition_id],
diff --git a/spec/features/projects/environments/environment_spec.rb b/spec/features/projects/environments/environment_spec.rb
index 699de6b0784..61878a05fdf 100644
--- a/spec/features/projects/environments/environment_spec.rb
+++ b/spec/features/projects/environments/environment_spec.rb
@@ -50,6 +50,7 @@ RSpec.describe 'Environment', feature_category: :environment_management do
context 'without deployments' do
before do
visit_environment(environment)
+ click_link s_('Environments|Deployment history')
end
it 'does not show deployments' do
@@ -60,6 +61,7 @@ RSpec.describe 'Environment', feature_category: :environment_management do
context 'with deployments' do
before do
visit_environment(environment)
+ click_link s_('Environments|Deployment history')
end
context 'when there is no related deployable' do
@@ -124,6 +126,7 @@ RSpec.describe 'Environment', feature_category: :environment_management do
before do
visit_environment(environment)
+ click_link s_('Environments|Deployment history')
end
# This ordering is unexpected and to be fixed.
@@ -155,6 +158,7 @@ RSpec.describe 'Environment', feature_category: :environment_management do
before do
visit_environment(environment)
+ click_link s_('Environments|Deployment history')
end
it 'shows deployment information and buttons', :js do
diff --git a/spec/features/projects/feature_flags/user_sees_feature_flag_list_spec.rb b/spec/features/projects/feature_flags/user_sees_feature_flag_list_spec.rb
index e2448887531..8832251db08 100644
--- a/spec/features/projects/feature_flags/user_sees_feature_flag_list_spec.rb
+++ b/spec/features/projects/feature_flags/user_sees_feature_flag_list_spec.rb
@@ -42,7 +42,7 @@ RSpec.describe 'User sees feature flag list', :js, feature_category: :feature_fl
expect_status_toggle_button_not_to_be_checked
within_feature_flag_scopes do
- expect(page.find('[data-testid="strategy-label"]')).to have_content('All Users: All Environments, review/*')
+ expect(find_by_testid('strategy-label')).to have_content('All Users: All Environments, review/*')
end
end
end
@@ -66,7 +66,7 @@ RSpec.describe 'User sees feature flag list', :js, feature_category: :feature_fl
expect_status_toggle_button_to_be_checked
within_feature_flag_scopes do
- expect(page.find('[data-testid="strategy-label"]')).to have_content('All Users: production')
+ expect(find_by_testid('strategy-label')).to have_content('All Users: production')
end
end
end
diff --git a/spec/features/projects/fork_spec.rb b/spec/features/projects/fork_spec.rb
index e849f110014..6821c09a6cd 100644
--- a/spec/features/projects/fork_spec.rb
+++ b/spec/features/projects/fork_spec.rb
@@ -247,7 +247,7 @@ RSpec.describe 'Project fork', feature_category: :source_code_management do
visit project_path(project)
- forks_count_button = find('[data-testid="forks-count"]')
+ forks_count_button = find_by_testid('forks-count')
expect(forks_count_button).to have_content("2")
end
end
diff --git a/spec/features/projects/integrations/user_activates_jira_spec.rb b/spec/features/projects/integrations/user_activates_jira_spec.rb
index 942327cdfe8..9d5959b86bd 100644
--- a/spec/features/projects/integrations/user_activates_jira_spec.rb
+++ b/spec/features/projects/integrations/user_activates_jira_spec.rb
@@ -98,7 +98,7 @@ RSpec.describe 'User activates Jira', :js, feature_category: :integrations do
choose 'Use custom transitions'
click_save_integration
- within '[data-testid="issue-transition-mode"]' do
+ within_testid 'issue-transition-mode' do
expect(page).to have_content('This field is required.')
end
diff --git a/spec/features/projects/issues/design_management/user_views_designs_with_svg_xss_spec.rb b/spec/features/projects/issues/design_management/user_views_designs_with_svg_xss_spec.rb
index bbc54382ae6..c8c8e166db0 100644
--- a/spec/features/projects/issues/design_management/user_views_designs_with_svg_xss_spec.rb
+++ b/spec/features/projects/issues/design_management/user_views_designs_with_svg_xss_spec.rb
@@ -29,7 +29,7 @@ RSpec.describe 'User views an SVG design that contains XSS', :js, feature_catego
end
it 'displays the SVG', quarantine: 'https://gitlab.com/gitlab-org/gitlab/-/issues/381115' do
- find("[data-testid='close-design']").click
+ find_by_testid('close-design').click
expect(page).to have_selector("img.design-img[alt='xss.svg']", count: 1, visible: false)
end
diff --git a/spec/features/projects/jobs/permissions_spec.rb b/spec/features/projects/jobs/permissions_spec.rb
index 7759875c2a5..e183ad98882 100644
--- a/spec/features/projects/jobs/permissions_spec.rb
+++ b/spec/features/projects/jobs/permissions_spec.rb
@@ -98,7 +98,7 @@ RSpec.describe 'Project Jobs Permissions', feature_category: :groups_and_project
it_behaves_like 'project jobs page responds with status', 200 do
it 'renders job', :js do
- page.within('[data-testid="jobs-table"]') do
+ within_testid('jobs-table') do
expect(page).to have_content("##{job.id}")
.and have_content(job.sha[0..7])
.and have_content(job.ref)
diff --git a/spec/features/projects/jobs/user_browses_job_spec.rb b/spec/features/projects/jobs/user_browses_job_spec.rb
index 1be1a58d212..6dc0d71d6ba 100644
--- a/spec/features/projects/jobs/user_browses_job_spec.rb
+++ b/spec/features/projects/jobs/user_browses_job_spec.rb
@@ -28,7 +28,7 @@ RSpec.describe 'User browses a job', :js, feature_category: :continuous_integrat
# scroll to the top of the page first
execute_script "window.scrollTo(0,0)"
accept_gl_confirm(button_text: 'Erase job log') do
- find('[data-testid="job-log-erase-link"]').click
+ find_by_testid('job-log-erase-link').click
end
wait_for_requests
@@ -96,7 +96,7 @@ RSpec.describe 'User browses a job', :js, feature_category: :continuous_integrat
it 'searches for supplied substring' do
find('[data-testid="job-log-search-box"] input').set('GroupsHelper')
- find('[data-testid="search-button"]').click
+ find_by_testid('search-button').click
expect(page).to have_content('26 results found for GroupsHelper')
end
@@ -104,7 +104,7 @@ RSpec.describe 'User browses a job', :js, feature_category: :continuous_integrat
it 'shows no results for supplied substring' do
find('[data-testid="job-log-search-box"] input').set('YouWontFindMe')
- find('[data-testid="search-button"]').click
+ find_by_testid('search-button').click
expect(page).to have_content('No search results found')
end
diff --git a/spec/features/projects/jobs/user_browses_jobs_spec.rb b/spec/features/projects/jobs/user_browses_jobs_spec.rb
index 5c1dc36a31c..545bfee4910 100644
--- a/spec/features/projects/jobs/user_browses_jobs_spec.rb
+++ b/spec/features/projects/jobs/user_browses_jobs_spec.rb
@@ -26,16 +26,18 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'shows a tab for All jobs and count' do
- expect(page.find('[data-testid="jobs-all-tab"]').text).to include('All')
- expect(page.find('[data-testid="jobs-all-tab"] .badge').text).to include('0')
+ expect(find_by_testid('jobs-all-tab').text).to include('All')
+ within_testid('jobs-all-tab') do
+ expect(page.find('.badge').text).to include('0')
+ end
end
it 'shows a tab for Finished jobs and count' do
- expect(page.find('[data-testid="jobs-finished-tab"]').text).to include('Finished')
+ expect(find_by_testid('jobs-finished-tab').text).to include('Finished')
end
it 'updates the content when tab is clicked' do
- page.find('[data-testid="jobs-finished-tab"]').click
+ find_by_testid('jobs-finished-tab').click
wait_for_requests
expect(page).to have_content('No jobs to show')
@@ -68,7 +70,7 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'cancels a job successfully' do
- page.find('[data-testid="cancel-button"]').click
+ find_by_testid('cancel-button').click
wait_for_requests
@@ -89,7 +91,7 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'retries a job successfully' do
- page.find('[data-testid="retry"]').click
+ find_by_testid('retry').click
wait_for_requests
@@ -111,7 +113,7 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'shows the coverage' do
- page.within('[data-testid="job-coverage"]') do
+ within_testid('job-coverage') do
expect(page).to have_content('99.9%')
end
end
@@ -125,7 +127,7 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'plays a job successfully' do
- page.find('[data-testid="play-scheduled"]').click
+ find_by_testid('play-scheduled').click
page.within '#play-job-modal' do
page.find_button('OK').click
@@ -137,7 +139,7 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'unschedules a job successfully' do
- page.find('[data-testid="unschedule"]').click
+ find_by_testid('unschedule').click
wait_for_requests
@@ -191,19 +193,19 @@ RSpec.describe 'User browses jobs', feature_category: :continuous_integration do
end
it 'contains a link to the pipeline' do
- expect(page.find('[data-testid="pipeline-id"]')).to have_content "##{pipeline.id}"
+ expect(find_by_testid('pipeline-id')).to have_content "##{pipeline.id}"
end
it 'contains a link to the job sha' do
- expect(page.find('[data-testid="job-sha"]')).to have_content job.sha[0..7].to_s
+ expect(find_by_testid('job-sha')).to have_content job.sha[0..7].to_s
end
it 'contains a link to the job id' do
- expect(page.find('[data-testid="job-id-link"]')).to have_content job.id.to_s
+ expect(find_by_testid('job-id-link')).to have_content job.id.to_s
end
it 'contains a link to the job ref' do
- expect(page.find('[data-testid="job-ref"]')).to have_content job.ref.to_s
+ expect(find_by_testid('job-ref')).to have_content job.ref.to_s
end
end
end
diff --git a/spec/features/projects/jobs/user_triggers_manual_job_with_variables_spec.rb b/spec/features/projects/jobs/user_triggers_manual_job_with_variables_spec.rb
index 21274572352..3beea5bf269 100644
--- a/spec/features/projects/jobs/user_triggers_manual_job_with_variables_spec.rb
+++ b/spec/features/projects/jobs/user_triggers_manual_job_with_variables_spec.rb
@@ -19,12 +19,12 @@ RSpec.describe 'User triggers manual job with variables', :js, feature_category:
end
it 'passes values correctly' do
- page.within(find("[data-testid='ci-variable-row']")) do
- find("[data-testid='ci-variable-key']").set('key_name')
- find("[data-testid='ci-variable-value']").set('key_value')
+ within_testid('ci-variable-row') do
+ find_by_testid('ci-variable-key').set('key_name')
+ find_by_testid('ci-variable-value').set('key_value')
end
- find("[data-testid='run-manual-job-btn']").click
+ find_by_testid('run-manual-job-btn').click
wait_for_requests
diff --git a/spec/features/projects/jobs_spec.rb b/spec/features/projects/jobs_spec.rb
index 050ed4e0e4c..7a45ddce3d7 100644
--- a/spec/features/projects/jobs_spec.rb
+++ b/spec/features/projects/jobs_spec.rb
@@ -203,7 +203,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
end
it 'renders escaped tooltip name' do
- page.find('[data-testid="active-job"]').hover
+ find_by_testid('active-job').hover
expect(page).to have_content('
- passed')
end
end
@@ -240,7 +240,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
href = new_project_issue_path(project, options)
page.within('aside.right-sidebar') do
- expect(find('[data-testid="job-new-issue"]')['href']).to include(href)
+ expect(find_by_testid('job-new-issue')['href']).to include(href)
end
end
end
@@ -257,7 +257,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
context 'job is cancelable' do
it 'shows cancel button' do
- find('[data-testid="cancel-button"]').click
+ find_by_testid('cancel-button').click
expect(page).to have_current_path(job_url, ignore_query: true)
end
@@ -544,7 +544,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
it 'shows deployment message' do
expect(page).to have_content 'This job is deployed to production'
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
context 'when there is a cluster used for the deployment' do
@@ -576,7 +576,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
it 'shows a link for the job' do
expect(page).to have_link environment.name
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
end
@@ -586,7 +586,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
it 'shows a link to latest deployment' do
expect(page).to have_link environment.name
expect(page).to have_content 'This job is creating a deployment'
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
end
end
@@ -638,15 +638,15 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
end
it 'renders a link to the most recent deployment' do
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
- expect(find('[data-testid="job-deployment-link"]')['href']).to include(second_deployment.deployable.project.path, second_deployment.deployable_id.to_s)
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-deployment-link')['href']).to include(second_deployment.deployable.project.path, second_deployment.deployable_id.to_s)
end
context 'when deployment does not have a deployable' do
let!(:second_deployment) { create(:deployment, :success, environment: environment, deployable: nil) }
it 'has an empty href' do
- expect(find('[data-testid="job-deployment-link"]')['href']).to be_empty
+ expect(find_by_testid('job-deployment-link')['href']).to be_empty
end
end
end
@@ -672,7 +672,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
expected_text = 'This job is creating a deployment to staging'
expect(page).to have_css('.environment-information', text: expected_text)
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
context 'when it has deployment' do
@@ -683,7 +683,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
expect(page).to have_css('.environment-information', text: expected_text)
expect(page).to have_css('.environment-information', text: 'latest deployment')
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
end
end
@@ -698,7 +698,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
'.environment-information', text: expected_text)
expect(page).not_to have_css(
'.environment-information', text: 'latest deployment')
- expect(find('[data-testid="job-environment-link"]')['href']).to match("environments/#{environment.id}")
+ expect(find_by_testid('job-environment-link')['href']).to match("environments/#{environment.id}")
end
end
end
@@ -878,7 +878,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
visit project_job_path(project, job)
wait_for_requests
- page.within('[data-testid="job-erased-block"]') do
+ within_testid('job-erased-block') do
expect(page).to have_content('Job has been erased')
end
end
@@ -987,7 +987,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
before do
job.run!
visit project_job_path(project, job)
- find('[data-testid="cancel-button"]').click
+ find_by_testid('cancel-button').click
end
it 'loads the page and shows all needed controls' do
@@ -1004,7 +1004,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
visit project_job_path(project, job)
wait_for_requests
- find('[data-testid="retry-button"]').click
+ find_by_testid('retry-button').click
end
it 'shows the right status and buttons' do
@@ -1039,7 +1039,7 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
visit project_job_path(project, job)
wait_for_requests
- find('[data-testid="retry-button"]').click
+ find_by_testid('retry-button').click
end
it 'shows a modal to warn the user' do
@@ -1049,9 +1049,9 @@ RSpec.describe 'Jobs', :clean_gitlab_redis_shared_state, feature_category: :grou
end
it 'retries the job' do
- find('[data-testid="retry-button-modal"]').click
+ find_by_testid('retry-button-modal').click
- within '[data-testid="job-header-content"]' do
+ within_testid 'job-header-content' do
expect(page).to have_content('Pending')
end
end
diff --git a/spec/features/projects/members/group_member_cannot_leave_group_project_spec.rb b/spec/features/projects/members/group_member_cannot_leave_group_project_spec.rb
index 97b29ee6c91..8fbf08e87e9 100644
--- a/spec/features/projects/members/group_member_cannot_leave_group_project_spec.rb
+++ b/spec/features/projects/members/group_member_cannot_leave_group_project_spec.rb
@@ -21,6 +21,6 @@ RSpec.describe 'Projects > Members > Group member cannot leave group project', f
it 'renders a flash message if attempting to leave by url', :js do
visit project_path(project, leave: 1)
- expect(find('[data-testid="alert-danger"]')).to have_content 'You do not have permission to leave this project'
+ expect(find_by_testid('alert-danger')).to have_content 'You do not have permission to leave this project'
end
end
diff --git a/spec/features/projects/members/groups_with_access_list_spec.rb b/spec/features/projects/members/groups_with_access_list_spec.rb
index a2a04ada627..439c2f1a28a 100644
--- a/spec/features/projects/members/groups_with_access_list_spec.rb
+++ b/spec/features/projects/members/groups_with_access_list_spec.rb
@@ -56,7 +56,7 @@ RSpec.describe 'Projects > Members > Groups with access list', :js, feature_cate
page.within find_group_row(group) do
expect(page).to have_field('Expiration date', with: expiration_date)
- find('[data-testid="clear-button"]').click
+ find_by_testid('clear-button').click
wait_for_requests
diff --git a/spec/features/projects/members/master_adds_member_with_expiration_date_spec.rb b/spec/features/projects/members/master_adds_member_with_expiration_date_spec.rb
index b51259bea23..8b720659a39 100644
--- a/spec/features/projects/members/master_adds_member_with_expiration_date_spec.rb
+++ b/spec/features/projects/members/master_adds_member_with_expiration_date_spec.rb
@@ -52,7 +52,7 @@ RSpec.describe 'Projects > Members > Maintainer adds member with expiration date
page.within find_member_row(new_member) do
expect(page).to have_field('Expiration date', with: five_days_from_now)
- find('[data-testid="clear-button"]').click
+ find_by_testid('clear-button').click
wait_for_requests
diff --git a/spec/features/projects/members/sorting_spec.rb b/spec/features/projects/members/sorting_spec.rb
index 7457fbc6989..f8ef1c0ab46 100644
--- a/spec/features/projects/members/sorting_spec.rb
+++ b/spec/features/projects/members/sorting_spec.rb
@@ -147,7 +147,7 @@ RSpec.describe 'Projects > Members > Sorting', :js, feature_category: :groups_an
end
def expect_sort_by(text, sort_direction)
- within('[data-testid="members-sort-dropdown"]') do
+ within_testid('members-sort-dropdown') do
expect(page).to have_css('button[aria-haspopup="listbox"]', text: text)
expect(page).to have_button("Sort direction: #{sort_direction == :asc ? 'Ascending' : 'Descending'}")
end
diff --git a/spec/features/projects/packages_spec.rb b/spec/features/projects/packages_spec.rb
index 4e222a67b87..d69687fb053 100644
--- a/spec/features/projects/packages_spec.rb
+++ b/spec/features/projects/packages_spec.rb
@@ -49,8 +49,8 @@ RSpec.describe 'Packages', feature_category: :package_registry do
let_it_be(:package) { create(:package, project: project) }
it 'allows you to delete a package' do
- find('[data-testid="delete-dropdown"]').click
- find('[data-testid="action-delete"]').click
+ find_by_testid('delete-dropdown').click
+ find_by_testid('action-delete').click
click_button('Permanently delete')
expect(page).to have_content 'Package deleted successfully'
diff --git a/spec/features/projects/pipeline_schedules_spec.rb b/spec/features/projects/pipeline_schedules_spec.rb
index d481d90792d..7f3069d97a7 100644
--- a/spec/features/projects/pipeline_schedules_spec.rb
+++ b/spec/features/projects/pipeline_schedules_spec.rb
@@ -25,7 +25,7 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
end
it 'edits the pipeline' do
- page.find('[data-testid="edit-pipeline-schedule-btn"]').click
+ find_by_testid('edit-pipeline-schedule-btn').click
expect(page).to have_content(s_('PipelineSchedules|Edit pipeline schedule'))
end
@@ -108,12 +108,18 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
describe 'the view' do
it 'displays the required information description' do
- page.within('[data-testid="pipeline-schedule-table-row"]') do
+ within_testid('pipeline-schedule-table-row') do
expect(page).to have_content('pipeline schedule')
- expect(find('[data-testid="next-run-cell"] time')['title'])
- .to include(pipeline_schedule.real_next_run.strftime('%B %-d, %Y'))
+
+ within_testid('next-run-cell') do
+ expect(find('time')['title']).to include(pipeline_schedule.real_next_run.strftime('%B %-d, %Y'))
+ end
+
expect(page).to have_link('master')
- expect(find("[data-testid='last-pipeline-status'] a")['href']).to include(pipeline.id.to_s)
+
+ within_testid('last-pipeline-status') do
+ expect(find("a")['href']).to include(pipeline.id.to_s)
+ end
end
end
@@ -124,7 +130,7 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
end
it 'changes ownership of the pipeline' do
- find("[data-testid='take-ownership-pipeline-schedule-btn']").click
+ find_by_testid('take-ownership-pipeline-schedule-btn').click
page.within('#pipeline-take-ownership-modal') do
click_button s_('PipelineSchedules|Take ownership')
@@ -132,14 +138,14 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
wait_for_requests
end
- page.within('[data-testid="pipeline-schedule-table-row"]') do
+ within_testid('pipeline-schedule-table-row') do
expect(page).not_to have_content('No owner')
expect(page).to have_link('Sidney Jones')
end
end
it 'deletes the pipeline' do
- page.within('[data-testid="pipeline-schedule-table-row"]') do
+ within_testid('pipeline-schedule-table-row') do
click_button s_('PipelineSchedules|Delete pipeline schedule')
end
@@ -157,9 +163,9 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
end
it 'shows a list of the pipeline schedules with empty ref column' do
- target = find('[data-testid="pipeline-schedule-target"]')
+ target = find_by_testid('pipeline-schedule-target')
- page.within('[data-testid="pipeline-schedule-table-row"]') do
+ within_testid('pipeline-schedule-table-row') do
expect(target.text).to eq(s_('PipelineSchedules|None'))
end
end
@@ -173,7 +179,7 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
end
it 'shows a list of the pipeline schedules with empty ref column' do
- target = find('[data-testid="pipeline-schedule-target"]')
+ target = find_by_testid('pipeline-schedule-target')
expect(target.text).to eq(s_('PipelineSchedules|None'))
end
@@ -261,7 +267,7 @@ RSpec.describe 'Pipeline Schedules', :js, feature_category: :continuous_integrat
visit_pipelines_schedules
first('[data-testid="edit-pipeline-schedule-btn"]').click
- find('[data-testid="remove-ci-variable-row"]').click
+ find_by_testid('remove-ci-variable-row').click
save_pipeline_schedule
end
diff --git a/spec/frontend/environments/environment_details/components/kubernetes_overview_spec.js b/spec/frontend/environments/environment_details/components/kubernetes_overview_spec.js
new file mode 100644
index 00000000000..82a21e3239e
--- /dev/null
+++ b/spec/frontend/environments/environment_details/components/kubernetes_overview_spec.js
@@ -0,0 +1,200 @@
+import Vue, { nextTick } from 'vue';
+import VueApollo from 'vue-apollo';
+import { GlLoadingIcon, GlEmptyState, GlAlert } from '@gitlab/ui';
+import { shallowMount } from '@vue/test-utils';
+import KubernetesOverview from '~/environments/environment_details/components/kubernetes_overview.vue';
+import KubernetesStatusBar from '~/environments/components/kubernetes_status_bar.vue';
+import KubernetesAgentInfo from '~/environments/components/kubernetes_agent_info.vue';
+import KubernetesTabs from '~/environments/components/kubernetes_tabs.vue';
+import environmentClusterAgentQuery from '~/environments/graphql/queries/environment_cluster_agent.query.graphql';
+import createMockApollo from 'helpers/mock_apollo_helper';
+import waitForPromises from 'helpers/wait_for_promises';
+import { agent, kubernetesNamespace, fluxResourcePathMock } from '../../graphql/mock_data';
+import { mockKasTunnelUrl } from '../../mock_data';
+
+describe('~/environments/kubernetes_overview/index.vue', () => {
+ Vue.use(VueApollo);
+
+ let wrapper;
+
+ const propsData = {
+ environmentName: 'production',
+ projectFullPath: 'gitlab-group/test-project',
+ };
+
+ const provide = {
+ kasTunnelUrl: mockKasTunnelUrl,
+ };
+
+ const configuration = {
+ basePath: provide.kasTunnelUrl.replace(/\/$/, ''),
+ headers: {
+ 'GitLab-Agent-Id': '1',
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ },
+ credentials: 'include',
+ };
+
+ const createWrapper = (clusterAgent = agent) => {
+ const defaultEnvironmentData = {
+ data: {
+ project: {
+ id: '1',
+ environment: {
+ id: '1',
+ clusterAgent,
+ kubernetesNamespace,
+ fluxResourcePath: fluxResourcePathMock,
+ },
+ },
+ },
+ };
+ const mockApollo = createMockApollo(
+ [[environmentClusterAgentQuery, jest.fn().mockResolvedValue(defaultEnvironmentData)]],
+ [],
+ );
+
+ return shallowMount(KubernetesOverview, {
+ apolloProvider: mockApollo,
+ provide,
+ propsData,
+ });
+ };
+
+ const findLoadingIcon = () => wrapper.findComponent(GlLoadingIcon);
+ const findAgentInfo = () => wrapper.findComponent(KubernetesAgentInfo);
+ const findKubernetesStatusBar = () => wrapper.findComponent(KubernetesStatusBar);
+ const findKubernetesTabs = () => wrapper.findComponent(KubernetesTabs);
+ const findEmptyState = () => wrapper.findComponent(GlEmptyState);
+
+ const findAlert = () => wrapper.findComponent(GlAlert);
+
+ describe('when fetching data', () => {
+ beforeEach(() => {
+ wrapper = createWrapper();
+ });
+
+ it('renders loading indicator', () => {
+ expect(findLoadingIcon().exists()).toBe(true);
+ });
+
+ it("doesn't render Kubernetes related components", () => {
+ expect(findAgentInfo().exists()).toBe(false);
+ expect(findKubernetesStatusBar().exists()).toBe(false);
+ expect(findKubernetesTabs().exists()).toBe(false);
+ });
+
+ it("doesn't render empty state", () => {
+ expect(findEmptyState().exists()).toBe(false);
+ });
+ });
+
+ describe('when data is fetched', () => {
+ it('hides loading indicator', async () => {
+ wrapper = createWrapper();
+ await waitForPromises();
+
+ expect(findLoadingIcon().exists()).toBe(false);
+ });
+
+ describe('and there is cluster agent data', () => {
+ beforeEach(async () => {
+ wrapper = createWrapper();
+ await waitForPromises();
+ });
+
+ it('renders kubernetes agent info', () => {
+ expect(findAgentInfo().props('clusterAgent')).toEqual(agent);
+ });
+
+ it('renders kubernetes tabs', () => {
+ expect(findKubernetesTabs().props()).toEqual({
+ namespace: kubernetesNamespace,
+ configuration,
+ });
+ });
+
+ it('renders kubernetes status bar', () => {
+ expect(findKubernetesStatusBar().props()).toEqual({
+ clusterHealthStatus: 'success',
+ configuration,
+ environmentName: propsData.environmentName,
+ fluxResourcePath: fluxResourcePathMock,
+ });
+ });
+
+ describe('Kubernetes health status', () => {
+ beforeEach(async () => {
+ wrapper = createWrapper();
+ await waitForPromises();
+ });
+
+ it("doesn't set `clusterHealthStatus` when pods are still loading", async () => {
+ findKubernetesTabs().vm.$emit('loading', true);
+ await nextTick();
+
+ expect(findKubernetesStatusBar().props('clusterHealthStatus')).toBe('');
+ });
+
+ it('sets `clusterHealthStatus` as error when pods emitted a failure', async () => {
+ findKubernetesTabs().vm.$emit('update-failed-state', { pods: true });
+ await nextTick();
+
+ expect(findKubernetesStatusBar().props('clusterHealthStatus')).toBe('error');
+ });
+
+ it('sets `clusterHealthStatus` as success when data is loaded and no failures where emitted', () => {
+ expect(findKubernetesStatusBar().props('clusterHealthStatus')).toBe('success');
+ });
+
+ it('sets `clusterHealthStatus` as success after state update if there are no failures', async () => {
+ findKubernetesTabs().vm.$emit('update-failed-state', { pods: true });
+ await nextTick();
+ expect(findKubernetesStatusBar().props('clusterHealthStatus')).toBe('error');
+
+ findKubernetesTabs().vm.$emit('update-failed-state', { pods: false });
+ await nextTick();
+ expect(findKubernetesStatusBar().props('clusterHealthStatus')).toBe('success');
+ });
+ });
+
+ describe('on cluster error', () => {
+ beforeEach(async () => {
+ wrapper = createWrapper();
+ await waitForPromises();
+ });
+
+ it('shows alert with the error message', async () => {
+ const error = 'Error message from pods';
+
+ findKubernetesTabs().vm.$emit('cluster-error', error);
+ await nextTick();
+
+ expect(findAlert().text()).toBe(error);
+ });
+ });
+ });
+
+ describe('and there is no cluster agent data', () => {
+ beforeEach(async () => {
+ wrapper = createWrapper(null);
+ await waitForPromises();
+ });
+
+ it('renders empty state component', () => {
+ expect(findEmptyState().props()).toMatchObject({
+ title: 'No Kubernetes clusters configured',
+ primaryButtonText: 'Get started',
+ primaryButtonLink: '/help/ci/environments/kubernetes_dashboard',
+ });
+ });
+
+ it("doesn't render Kubernetes related components", () => {
+ expect(findAgentInfo().exists()).toBe(false);
+ expect(findKubernetesStatusBar().exists()).toBe(false);
+ expect(findKubernetesTabs().exists()).toBe(false);
+ });
+ });
+ });
+});
diff --git a/spec/frontend/environments/environment_details/index_spec.js b/spec/frontend/environments/environment_details/index_spec.js
index 90ce05a09b0..b352079ac2e 100644
--- a/spec/frontend/environments/environment_details/index_spec.js
+++ b/spec/frontend/environments/environment_details/index_spec.js
@@ -1,7 +1,9 @@
import { GlTabs, GlTab } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
+import { nextTick } from 'vue';
import EnvironmentsDetailPage from '~/environments/environment_details/index.vue';
import DeploymentsHistory from '~/environments/environment_details/components/deployment_history.vue';
+import KubernetesOverview from '~/environments/environment_details/components/kubernetes_overview.vue';
const projectFullPath = 'gitlab-group/test-project';
const environmentName = 'test-environment-name';
@@ -24,8 +26,10 @@ describe('~/environments/environment_details/index.vue', () => {
};
const findTabs = () => wrapper.findComponent(GlTabs);
- const findTab = () => wrapper.findComponent(GlTab);
+ const findAllTabs = () => wrapper.findAllComponents(GlTab);
+ const findTabByIndex = (index) => findAllTabs().at(index);
const findDeploymentHistory = () => wrapper.findComponent(DeploymentsHistory);
+ const findKubernetesOverview = () => wrapper.findComponent(KubernetesOverview);
beforeEach(() => {
wrapper = createWrapper();
@@ -35,13 +39,43 @@ describe('~/environments/environment_details/index.vue', () => {
expect(findTabs().props('syncActiveTabWithQueryParams')).toBe(true);
});
- describe('deployment history tab', () => {
+ it('sets proper CSS class to the active tab', () => {
+ expect(findTabByIndex(0).props('titleLinkClass')).toBe('gl-inset-border-b-2-theme-accent');
+ expect(findTabByIndex(1).props('titleLinkClass')).toBe('');
+ });
+
+ it('updates the CSS class when the active tab changes', async () => {
+ findTabs().vm.$emit('input', 1);
+ await nextTick();
+
+ expect(findTabByIndex(0).props('titleLinkClass')).toBe('');
+ expect(findTabByIndex(1).props('titleLinkClass')).toBe('gl-inset-border-b-2-theme-accent');
+ });
+
+ describe('kubernetes overview tab', () => {
it('renders correct title', () => {
- expect(findTab().attributes('title')).toBe('Deployment history');
+ expect(findTabByIndex(0).attributes('title')).toBe('Kubernetes overview');
});
it('renders correct query param value', () => {
- expect(findTab().attributes('query-param-value')).toBe('deployment-history');
+ expect(findTabByIndex(0).attributes('query-param-value')).toBe('kubernetes-overview');
+ });
+
+ it('renders kubernetes_overview component with correct props', () => {
+ expect(findKubernetesOverview().props()).toEqual({
+ projectFullPath,
+ environmentName,
+ });
+ });
+ });
+
+ describe('deployment history tab', () => {
+ it('renders correct title', () => {
+ expect(findTabByIndex(1).attributes('title')).toBe('Deployment history');
+ });
+
+ it('renders correct query param value', () => {
+ expect(findTabByIndex(1).attributes('query-param-value')).toBe('deployment-history');
});
it('renders deployment_history component with correct props', () => {
diff --git a/spec/frontend/environments/helpers/k8s_integration_helper_spec.js b/spec/frontend/environments/helpers/k8s_integration_helper_spec.js
index fdfbddf0d94..7db466dd4a5 100644
--- a/spec/frontend/environments/helpers/k8s_integration_helper_spec.js
+++ b/spec/frontend/environments/helpers/k8s_integration_helper_spec.js
@@ -1,7 +1,11 @@
-import { humanizeClusterErrors } from '~/environments/helpers/k8s_integration_helper';
-
+import {
+ humanizeClusterErrors,
+ createK8sAccessConfiguration,
+} from '~/environments/helpers/k8s_integration_helper';
import { CLUSTER_AGENT_ERROR_MESSAGES } from '~/environments/constants';
+jest.mock('~/lib/utils/csrf', () => ({ headers: { token: 'mock-csrf-token' } }));
+
describe('k8s_integration_helper', () => {
describe('humanizeClusterErrors', () => {
it.each(['unauthorized', 'forbidden', 'not found', 'other'])(
@@ -11,4 +15,42 @@ describe('k8s_integration_helper', () => {
},
);
});
+
+ describe('createK8sAccessConfiguration', () => {
+ const kasTunnelUrl = '//kas-tunnel-url';
+ const gitlabAgentId = 1;
+
+ const subject = createK8sAccessConfiguration({ kasTunnelUrl, gitlabAgentId });
+
+ it('receives kasTunnelUrl and sets it as a basePath', () => {
+ expect(subject).toMatchObject({
+ basePath: kasTunnelUrl,
+ });
+ });
+
+ it('receives gitlabAgentId and sets it as part of headers', () => {
+ expect(subject.headers).toMatchObject({
+ 'GitLab-Agent-Id': gitlabAgentId,
+ });
+ });
+
+ it('provides csrf headers into headers', () => {
+ expect(subject.headers).toMatchObject({
+ token: 'mock-csrf-token',
+ });
+ });
+
+ it('provides proper content type to the headers', () => {
+ expect(subject.headers).toMatchObject({
+ 'Content-Type': 'application/json',
+ Accept: 'application/json',
+ });
+ });
+
+ it('includes credentials', () => {
+ expect(subject).toMatchObject({
+ credentials: 'include',
+ });
+ });
+ });
});
diff --git a/spec/frontend/environments/kubernetes_status_bar_spec.js b/spec/frontend/environments/kubernetes_status_bar_spec.js
index 9c729c8da20..21f8f75f068 100644
--- a/spec/frontend/environments/kubernetes_status_bar_spec.js
+++ b/spec/frontend/environments/kubernetes_status_bar_spec.js
@@ -4,6 +4,8 @@ import { GlLoadingIcon, GlPopover, GlSprintf } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
import KubernetesStatusBar from '~/environments/components/kubernetes_status_bar.vue';
import {
+ CLUSTER_HEALTH_SUCCESS,
+ CLUSTER_HEALTH_ERROR,
CLUSTER_STATUS_HEALTHY_TEXT,
CLUSTER_STATUS_UNHEALTHY_TEXT,
SYNC_STATUS_BADGES,
@@ -72,8 +74,8 @@ describe('~/environments/components/kubernetes_status_bar.vue', () => {
});
it.each([
- ['success', 'success', 'status-success', CLUSTER_STATUS_HEALTHY_TEXT],
- ['error', 'danger', 'status-alert', CLUSTER_STATUS_UNHEALTHY_TEXT],
+ [CLUSTER_HEALTH_SUCCESS, 'success', 'status-success', CLUSTER_STATUS_HEALTHY_TEXT],
+ [CLUSTER_HEALTH_ERROR, 'danger', 'status-alert', CLUSTER_STATUS_UNHEALTHY_TEXT],
])(
'when clusterHealthStatus is %s shows health badge with variant %s, icon %s and text %s',
(status, variant, icon, text) => {
diff --git a/spec/lib/generators/batched_background_migration/batched_background_migration_generator_spec.rb b/spec/lib/generators/batched_background_migration/batched_background_migration_generator_spec.rb
index 893cf976074..e14813746cb 100644
--- a/spec/lib/generators/batched_background_migration/batched_background_migration_generator_spec.rb
+++ b/spec/lib/generators/batched_background_migration/batched_background_migration_generator_spec.rb
@@ -68,8 +68,9 @@ RSpec.describe BatchedBackgroundMigration::BatchedBackgroundMigrationGenerator,
expect(migration_job_file).to eq(expected_ee_migration_job_file)
end
- assert_file('ee/spec/lib/ee/gitlab/background_migration/my_batched_migration_spec.rb') do |migration_job_spec_file| # rubocop:disable Layout/LineLength
- expect(migration_job_spec_file).to match(/#{expected_migration_job_spec_file}/)
+ migration_job_spec_file = 'ee/spec/lib/ee/gitlab/background_migration/my_batched_migration_spec.rb'
+ assert_file(migration_job_spec_file) do |spec_file|
+ expect(spec_file).to match(/#{expected_migration_job_spec_file}/)
end
end
end
diff --git a/spec/lib/generators/batched_background_migration/expected_files/my_batched_migration_spec.txt b/spec/lib/generators/batched_background_migration/expected_files/my_batched_migration_spec.txt
index 185f6deeade..989d40cd8be 100644
--- a/spec/lib/generators/batched_background_migration/expected_files/my_batched_migration_spec.txt
+++ b/spec/lib/generators/batched_background_migration/expected_files/my_batched_migration_spec.txt
@@ -2,6 +2,6 @@
require 'spec_helper'
-RSpec.describe Gitlab::BackgroundMigration::MyBatchedMigration, feature_category: :database do # rubocop:disable Layout/LineLength
+RSpec.describe Gitlab::BackgroundMigration::MyBatchedMigration, feature_category: :database do
# Tests go here
end
diff --git a/spec/lib/gitlab/database/sharding_key_spec.rb b/spec/lib/gitlab/database/sharding_key_spec.rb
index 2a3f2a3155c..9891914a38c 100644
--- a/spec/lib/gitlab/database/sharding_key_spec.rb
+++ b/spec/lib/gitlab/database/sharding_key_spec.rb
@@ -50,7 +50,8 @@ RSpec.describe 'new tables missing sharding_key', feature_category: :cell do
'value_stream_dashboard_counts.namespace_id', # https://gitlab.com/gitlab-org/gitlab/-/issues/439555
'zoekt_indices.namespace_id',
'zoekt_repositories.project_identifier',
- 'ci_namespace_monthly_usages.namespace_id' # https://gitlab.com/gitlab-org/gitlab/-/issues/321400
+ 'ci_namespace_monthly_usages.namespace_id', # https://gitlab.com/gitlab-org/gitlab/-/issues/321400
+ 'ci_job_artifacts.project_id'
]
end
diff --git a/spec/models/ci/job_artifact_spec.rb b/spec/models/ci/job_artifact_spec.rb
index e65c1e2f577..69ab2692562 100644
--- a/spec/models/ci/job_artifact_spec.rb
+++ b/spec/models/ci/job_artifact_spec.rb
@@ -892,4 +892,26 @@ RSpec.describe Ci::JobArtifact, feature_category: :build_artifacts do
it_behaves_like 'returning attributes for object deletion'
end
end
+
+ describe 'routing table switch' do
+ context 'with ff disabled' do
+ before do
+ stub_feature_flags(ci_partitioning_use_ci_job_artifacts_routing_table: false)
+ end
+
+ it 'uses the legacy table' do
+ expect(described_class.table_name).to eq('ci_job_artifacts')
+ end
+ end
+
+ context 'with ff enabled' do
+ before do
+ stub_feature_flags(ci_partitioning_use_ci_job_artifacts_routing_table: true)
+ end
+
+ it 'uses the routing table' do
+ expect(described_class.table_name).to eq('p_ci_job_artifacts')
+ end
+ end
+ end
end
diff --git a/spec/services/ci/unlock_artifacts_service_spec.rb b/spec/services/ci/unlock_artifacts_service_spec.rb
index c149eaf41e5..2219ee3bddb 100644
--- a/spec/services/ci/unlock_artifacts_service_spec.rb
+++ b/spec/services/ci/unlock_artifacts_service_spec.rb
@@ -208,6 +208,7 @@ RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integra
subject { described_class.new(pipeline.project, pipeline.user).unlock_job_artifacts_query(pipeline_ids) }
let(:builds_table) { Ci::Build.quoted_table_name }
+ let(:job_artifacts_table) { Ci::JobArtifact.quoted_table_name }
context 'when given a single pipeline ID' do
let(:pipeline_ids) { [older_pipeline.id] }
@@ -215,11 +216,11 @@ RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integra
it 'produces the expected SQL string' do
expect(subject.squish).to eq <<~SQL.squish
UPDATE
- "ci_job_artifacts"
+ #{job_artifacts_table}
SET
"locked" = 0
WHERE
- "ci_job_artifacts"."job_id" IN
+ #{job_artifacts_table}."job_id" IN
(SELECT
#{builds_table}."id"
FROM
@@ -228,7 +229,7 @@ RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integra
#{builds_table}."type" = 'Ci::Build'
AND #{builds_table}."commit_id" = #{older_pipeline.id})
RETURNING
- ("ci_job_artifacts"."id")
+ (#{job_artifacts_table}."id")
SQL
end
end
@@ -239,11 +240,11 @@ RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integra
it 'produces the expected SQL string' do
expect(subject.squish).to eq <<~SQL.squish
UPDATE
- "ci_job_artifacts"
+ #{job_artifacts_table}
SET
"locked" = 0
WHERE
- "ci_job_artifacts"."job_id" IN
+ #{job_artifacts_table}."job_id" IN
(SELECT
#{builds_table}."id"
FROM
@@ -252,7 +253,7 @@ RSpec.describe Ci::UnlockArtifactsService, feature_category: :continuous_integra
#{builds_table}."type" = 'Ci::Build'
AND #{builds_table}."commit_id" IN (#{pipeline_ids.join(', ')}))
RETURNING
- ("ci_job_artifacts"."id")
+ (#{job_artifacts_table}."id")
SQL
end
end
diff --git a/yarn.lock b/yarn.lock
index 70e8694aefb..7a0ac6d4dff 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -4636,10 +4636,10 @@ core-js-pure@^3.30.2:
resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.35.0.tgz#4660033304a050215ae82e476bd2513a419fbb34"
integrity sha512-f+eRYmkou59uh7BPcyJ8MC76DiGhspj1KMxVIcF24tzP8NA9HVa1uC7BTW2tgx7E1QVCzDzsgp7kArrzhlz8Ew==
-core-js@^3.29.1, core-js@^3.35.1, core-js@^3.6.5:
- version "3.35.1"
- resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.35.1.tgz#9c28f8b7ccee482796f8590cc8d15739eaaf980c"
- integrity sha512-IgdsbxNyMskrTFxa9lWHyMwAJU5gXOPP+1yO+K59d50VLVAIDAbs7gIv705KzALModfK3ZrSZTPNpC0PQgIZuw==
+core-js@^3.29.1, core-js@^3.36.0, core-js@^3.6.5:
+ version "3.36.0"
+ resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.36.0.tgz#e752fa0b0b462a0787d56e9d73f80b0f7c0dde68"
+ integrity sha512-mt7+TUBbTFg5+GngsAxeKBTl5/VS0guFeJacYge9OmHb+m058UwwIm41SE9T4Den7ClatV57B6TYTuJ0CX1MAw==
core-util-is@~1.0.0:
version "1.0.3"