Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-05-07 12:10:36 +00:00
parent 265ec5be60
commit 201937191f
137 changed files with 2356 additions and 1236 deletions

View File

@ -2,22 +2,6 @@
# Cop supports --autocorrect.
Layout/ArgumentAlignment:
Exclude:
- 'app/graphql/mutations/ci/job/base.rb'
- 'app/graphql/mutations/ci/job/cancel.rb'
- 'app/graphql/mutations/ci/job/play.rb'
- 'app/graphql/mutations/ci/job/retry.rb'
- 'app/graphql/mutations/ci/job/unschedule.rb'
- 'app/graphql/mutations/ci/job_artifact/destroy.rb'
- 'app/graphql/mutations/ci/job_token_scope/add_project.rb'
- 'app/graphql/mutations/ci/job_token_scope/remove_project.rb'
- 'app/graphql/mutations/ci/pipeline/base.rb'
- 'app/graphql/mutations/ci/pipeline/retry.rb'
- 'app/graphql/mutations/ci/pipeline_schedule/base.rb'
- 'app/graphql/mutations/ci/pipeline_schedule/create.rb'
- 'app/graphql/mutations/ci/pipeline_schedule/play.rb'
- 'app/graphql/mutations/ci/pipeline_schedule/take_ownership.rb'
- 'app/graphql/mutations/ci/pipeline_schedule/update.rb'
- 'app/graphql/mutations/ci/runner/bulk_delete.rb'
- 'app/graphql/mutations/customer_relations/contacts/base.rb'
- 'app/graphql/mutations/customer_relations/contacts/create.rb'
- 'app/graphql/mutations/customer_relations/contacts/update.rb'
@ -173,7 +157,6 @@ Layout/ArgumentAlignment:
- 'ee/app/graphql/mutations/iterations/cadences/destroy.rb'
- 'ee/app/graphql/mutations/iterations/create.rb'
- 'ee/app/graphql/mutations/iterations/update.rb'
- 'ee/app/graphql/mutations/namespaces/base.rb'
- 'ee/app/graphql/mutations/projects/initialize_product_analytics.rb'
- 'ee/app/graphql/mutations/projects/set_compliance_framework.rb'
- 'ee/app/graphql/mutations/quality_management/test_cases/create.rb'

View File

@ -172,21 +172,6 @@ Layout/SpaceInLambdaLiteral:
- 'ee/app/models/ee/release.rb'
- 'ee/app/models/ee/vulnerability.rb'
- 'ee/app/models/elasticsearch_indexed_namespace.rb'
- 'ee/app/models/geo_node.rb'
- 'ee/app/models/gitlab_subscription.rb'
- 'ee/app/models/incident_management/escalation_policy.rb'
- 'ee/app/models/incident_management/escalation_rule.rb'
- 'ee/app/models/incident_management/oncall_participant.rb'
- 'ee/app/models/incident_management/oncall_rotation.rb'
- 'ee/app/models/incident_management/oncall_schedule.rb'
- 'ee/app/models/incident_management/oncall_shift.rb'
- 'ee/app/models/iteration.rb'
- 'ee/app/models/iterations/cadence.rb'
- 'ee/app/models/merge_request_block.rb'
- 'ee/app/models/merge_requests/compliance_violation.rb'
- 'ee/app/models/namespaces/namespace_ban.rb'
- 'ee/app/models/requirements_management/requirement.rb'
- 'ee/app/models/resource_iteration_event.rb'
- 'ee/app/models/saml_group_link.rb'
- 'ee/app/models/sca/license_compliance.rb'
- 'ee/app/models/security/finding.rb'

View File

@ -11,17 +11,6 @@ Lint/AmbiguousOperatorPrecedence:
- 'app/helpers/timeboxes_helper.rb'
- 'app/helpers/tree_helper.rb'
- 'app/helpers/users_helper.rb'
- 'app/models/concerns/relative_positioning.rb'
- 'app/models/design_management/version.rb'
- 'app/models/integrations/chat_message/pipeline_message.rb'
- 'app/models/merge_request_diff.rb'
- 'app/models/namespace.rb'
- 'app/models/network/graph.rb'
- 'app/models/note.rb'
- 'app/models/notification_reason.rb'
- 'app/models/project_feature.rb'
- 'app/models/terraform/state.rb'
- 'app/models/webauthn_registration.rb'
- 'app/presenters/project_presenter.rb'
- 'app/services/cohorts_service.rb'
- 'app/services/concerns/validates_classification_label.rb'

View File

@ -91,19 +91,3 @@ Lint/SymbolConversion:
- 'spec/requests/api/terraform/state_spec.rb'
- 'spec/requests/mailgun/webhooks_controller_spec.rb'
- 'spec/requests/users_controller_spec.rb'
- 'spec/scripts/pipeline_test_report_builder_spec.rb'
- 'spec/serializers/integrations/harbor_serializers/artifact_entity_spec.rb'
- 'spec/serializers/integrations/harbor_serializers/tag_entity_spec.rb'
- 'spec/services/bulk_imports/get_importable_data_service_spec.rb'
- 'spec/services/ci/pipeline_processing/atomic_processing_service_spec.rb'
- 'spec/services/git/base_hooks_service_spec.rb'
- 'spec/services/incident_management/timeline_event_tags/create_service_spec.rb'
- 'spec/services/jira_connect/sync_service_spec.rb'
- 'spec/services/ml/experiment_tracking/candidate_repository_spec.rb'
- 'spec/support/helpers/kubernetes_helpers.rb'
- 'spec/support/helpers/prometheus_helpers.rb'
- 'spec/support/shared_examples/harbor/artifacts_controller_shared_examples.rb'
- 'spec/support/shared_examples/harbor/repositories_controller_shared_examples.rb'
- 'spec/support/shared_examples/harbor/tags_controller_shared_examples.rb'
- 'spec/support/shared_examples/models/diff_positionable_note_shared_examples.rb'
- 'spec/workers/gitlab/github_gists_import/import_gist_worker_spec.rb'

View File

@ -176,7 +176,6 @@ RSpec/FeatureCategory:
- 'ee/spec/graphql/mutations/merge_requests/accept_spec.rb'
- 'ee/spec/graphql/mutations/merge_requests/set_assignees_spec.rb'
- 'ee/spec/graphql/mutations/merge_requests/set_reviewers_spec.rb'
- 'ee/spec/graphql/mutations/namespaces/increase_storage_temporarily_spec.rb'
- 'ee/spec/graphql/mutations/projects/set_compliance_framework_spec.rb'
- 'ee/spec/graphql/mutations/projects/set_locked_spec.rb'
- 'ee/spec/graphql/mutations/releases/update_spec.rb'

View File

@ -140,7 +140,6 @@ RSpec/NamedSubject:
- 'ee/spec/graphql/mutations/issues/update_spec.rb'
- 'ee/spec/graphql/mutations/merge_requests/set_reviewers_spec.rb'
- 'ee/spec/graphql/mutations/merge_requests/update_approval_rules_spec.rb'
- 'ee/spec/graphql/mutations/namespaces/increase_storage_temporarily_spec.rb'
- 'ee/spec/graphql/mutations/projects/set_locked_spec.rb'
- 'ee/spec/graphql/mutations/releases/update_spec.rb'
- 'ee/spec/graphql/mutations/requirements_management/create_requirement_spec.rb'
@ -261,7 +260,6 @@ RSpec/NamedSubject:
- 'ee/spec/lib/bulk_imports/groups/pipelines/iterations_cadences_pipeline_spec.rb'
- 'ee/spec/lib/bulk_imports/groups/pipelines/iterations_pipeline_spec.rb'
- 'ee/spec/lib/code_suggestions/prompts/code_completion/vertex_ai_spec.rb'
- 'ee/spec/lib/code_suggestions/prompts/code_generation/anthropic_spec.rb'
- 'ee/spec/lib/code_suggestions/task_factory_spec.rb'
- 'ee/spec/lib/code_suggestions/tasks/base_spec.rb'
- 'ee/spec/lib/ee/api/entities/dependency_list_export_spec.rb'

View File

@ -1 +1 @@
613fa6169f5750468bdd4a15b1f453ea4f17c45c
86baea59fb2ce0379b40ee571a168cfe8b134079

View File

@ -83,7 +83,6 @@ gem 'omniauth-google-oauth2', '~> 1.1' # rubocop:todo Gemfile/MissingFeatureCate
gem 'omniauth-oauth2-generic', '~> 0.2.2' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'omniauth-saml', '~> 2.1.0' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'omniauth-shibboleth-redux', '~> 2.0', require: 'omniauth-shibboleth' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'omniauth-twitter', '~> 1.4' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'omniauth_crowd', '~> 2.4.0', path: 'vendor/gems/omniauth_crowd' # See vendor/gems/omniauth_crowd/README.md # rubocop:todo Gemfile/MissingFeatureCategory
gem 'omniauth_openid_connect', '~> 0.6.1' # rubocop:todo Gemfile/MissingFeatureCategory
# Locked until Ruby 3.0 upgrade since upgrading will pull in an updated net-smtp gem.

View File

@ -436,12 +436,10 @@
{"name":"omniauth-facebook","version":"4.0.0","platform":"ruby","checksum":"05ae3565c8fdb38df8dab04eb8ca854ea6c18e81591d3e6598ce101293a2f20f"},
{"name":"omniauth-github","version":"2.0.1","platform":"ruby","checksum":"8ff8e70ac6d6db9d52485eef52cfa894938c941496e66b52b5e2773ade3ccad4"},
{"name":"omniauth-google-oauth2","version":"1.1.1","platform":"ruby","checksum":"4496f126e84eaf760f9c6a5c69e5e7511f98092d7f25ad79fd2c0ae5e09b5039"},
{"name":"omniauth-oauth","version":"1.2.0","platform":"ruby","checksum":"e7a78658498dc83aa3f3be1a776425c0f06a60d45d9236dbe5e98e61fadf827b"},
{"name":"omniauth-oauth2","version":"1.8.0","platform":"ruby","checksum":"b2f8e9559cc7e2d4efba57607691d6d2b634b879fc5b5b6ccfefa3da85089e78"},
{"name":"omniauth-oauth2-generic","version":"0.2.8","platform":"ruby","checksum":"ce6e8539019d5ebf2f48867072b9f248f148bb4cbe7166dee655865abfae7613"},
{"name":"omniauth-saml","version":"2.1.0","platform":"ruby","checksum":"969cb7ba271891d09dfa57b206fc274f43203c52727492517decda93decc6906"},
{"name":"omniauth-shibboleth-redux","version":"2.0.0","platform":"ruby","checksum":"e9b353fd103405fcc8549e8510b9cad857acf0b286d764fac5dba8a93ab8ffe1"},
{"name":"omniauth-twitter","version":"1.4.0","platform":"ruby","checksum":"c5cc6c77cd767745ffa9ebbd5fbd694a3fa99d1d2d82a4d7def0bf3b6131b264"},
{"name":"omniauth_openid_connect","version":"0.6.1","platform":"ruby","checksum":"5f1318f5b19b05e339ff494def060b57a503b1e3ea83c3a0ced6cc014407d423"},
{"name":"open4","version":"1.3.4","platform":"ruby","checksum":"a1df037310624ecc1ea1d81264b11c83e96d0c3c1c6043108d37d396dcd0f4b1"},
{"name":"openid_connect","version":"1.3.0","platform":"ruby","checksum":"a796855096850cc01140e37ea6ae9fd14f2be818b9b5bc698418063dfe228770"},

View File

@ -1195,9 +1195,6 @@ GEM
oauth2 (~> 2.0.6)
omniauth (~> 2.0)
omniauth-oauth2 (~> 1.8.0)
omniauth-oauth (1.2.0)
oauth
omniauth (>= 1.0, < 3)
omniauth-oauth2 (1.8.0)
oauth2 (>= 1.4, < 3)
omniauth (~> 2.0)
@ -1209,9 +1206,6 @@ GEM
ruby-saml (~> 1.12)
omniauth-shibboleth-redux (2.0.0)
omniauth (>= 2.0.0)
omniauth-twitter (1.4.0)
omniauth-oauth (~> 1.1)
rack
omniauth_openid_connect (0.6.1)
omniauth (>= 1.9, < 3)
openid_connect (~> 1.1)
@ -2124,7 +2118,6 @@ DEPENDENCIES
omniauth-salesforce (~> 1.0.5)!
omniauth-saml (~> 2.1.0)
omniauth-shibboleth-redux (~> 2.0)
omniauth-twitter (~> 1.4)
omniauth_crowd (~> 2.4.0)!
omniauth_openid_connect (~> 0.6.1)
openid_connect (= 1.3.0)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.6 KiB

View File

@ -416,8 +416,8 @@ export default {
label-for="ci-variable-type"
class="gl-border-none"
:class="{
'gl-mb-n5': !hideEnvironmentScope,
'gl-mb-n1': hideEnvironmentScope,
'-gl-mb-5': !hideEnvironmentScope,
'-gl-mb-1': hideEnvironmentScope,
}"
>
<gl-form-select
@ -428,7 +428,7 @@ export default {
</gl-form-group>
<gl-form-group
v-if="!hideEnvironmentScope"
class="gl-border-none gl-mb-n5"
class="gl-border-none -gl-mb-5"
label-for="ci-variable-env"
data-testid="environment-scope"
>
@ -464,9 +464,9 @@ export default {
readonly
/>
</gl-form-group>
<gl-form-group class="gl-border-none gl-mb-n8">
<gl-form-group class="gl-border-none -gl-mb-8">
<template #label>
<div class="gl-display-flex gl-align-items-center gl-mb-n3">
<div class="gl-display-flex gl-align-items-center -gl-mb-3">
<span class="gl-mr-2">
{{ $options.i18n.flags }}
</span>
@ -509,7 +509,7 @@ export default {
<gl-form-group
label-for="ci-variable-description"
:label="$options.i18n.description"
class="gl-border-none gl-mb-n5"
class="gl-border-none -gl-mb-5"
data-testid="ci-variable-description-label"
:description="$options.i18n.descriptionHelpText"
optional
@ -525,7 +525,7 @@ export default {
v-model="variable.key"
:token-list="$options.awsTokenList"
:label-text="$options.i18n.key"
class="gl-border-none gl-pb-0! gl-mb-n5"
class="gl-border-none gl-pb-0! -gl-mb-5"
data-testid="ci-variable-key"
/>
<p
@ -548,7 +548,7 @@ export default {
<gl-form-group
:label="$options.i18n.value"
label-for="ci-variable-value"
class="gl-border-none gl-mb-n2"
class="gl-border-none -gl-mb-2"
data-testid="ci-variable-value-label"
:invalid-feedback="maskedValidationIssuesText"
:state="isValueValid"

View File

@ -279,7 +279,7 @@ export default {
<template #cell(key)="{ item }">
<div data-testid="ci-variable-table-row-variable">
<div
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start gl-mr-n3"
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start -gl-mr-3"
>
<span
:id="`ci-variable-key-${item.id}`"
@ -315,7 +315,7 @@ export default {
</template>
<template v-if="!isInheritedGroupVars" #cell(value)="{ item }">
<div
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start gl-mr-n3"
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start -gl-mr-3"
>
<span v-if="areValuesHidden" data-testid="hiddenValue">*****</span>
<span
@ -339,7 +339,7 @@ export default {
</template>
<template #cell(environmentScope)="{ item }">
<div
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start gl-mr-n3"
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start -gl-mr-3"
>
<span
:id="`ci-variable-env-${item.id}`"
@ -360,7 +360,7 @@ export default {
</template>
<template v-if="isInheritedGroupVars" #cell(group)="{ item }">
<div
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start gl-mr-n3"
class="gl-display-flex gl-align-items-flex-start gl-justify-content-end gl-md-justify-content-start -gl-mr-3"
>
<gl-link
:id="`ci-variable-group-${item.id}`"
@ -373,7 +373,7 @@ export default {
</div>
</template>
<template v-if="!isInheritedGroupVars" #cell(actions)="{ item }">
<div class="gl-display-flex gl-justify-content-end gl-mt-n2 gl-mb-n2">
<div class="gl-display-flex gl-justify-content-end -gl-mt-2 -gl-mb-2">
<gl-button
icon="pencil"
size="small"

View File

@ -214,7 +214,7 @@ export default {
:pipeline-iid="item.iid"
:pipeline-path="item.path"
:project-path="getProjectPath(item)"
class="gl-ml-n4 gl-mt-n3 gl-mb-n1"
class="-gl-ml-4 -gl-mt-3 -gl-mb-1"
/>
</template>
</gl-table-lite>

View File

@ -100,7 +100,7 @@ export default {
{{ name }}
</h1>
<div class="gl-display-flex gl-align-self-start gl-mt-n2">
<div class="gl-display-flex gl-align-self-start -gl-mt-2">
<div class="gl-flex-grow-1 gl-flex-shrink-0 gl-text-right">
<gl-button
:aria-label="__('Toggle sidebar')"

View File

@ -68,7 +68,7 @@ export default {
<template>
<div>
<div class="gl-text-truncate gl-p-3 gl-mt-n3 gl-mx-n3 gl-mb-n2">
<div class="gl-text-truncate gl-p-3 -gl-mt-3 gl-mx-n3 -gl-mb-2">
<gl-icon
v-if="jobStuck"
v-gl-tooltip="$options.i18n.stuckText"

View File

@ -40,7 +40,7 @@ export default {
<template>
<div>
<div class="gl-p-3 gl-mt-n3 gl-mx-n3">
<div class="gl-p-3 -gl-mt-3 gl-mx-n3">
<gl-link class="gl-text-truncate" :href="pipelinePath" data-testid="pipeline-id">
{{ pipelineId }}
</gl-link>

View File

@ -126,7 +126,7 @@ export default {
hide-tooltip
/>
<div class="gl-font-weight-100 gl-font-size-lg gl-ml-n4 gl-align-self-center">
<div class="gl-font-weight-100 gl-font-size-lg -gl-ml-4 gl-align-self-center">
{{ group.size }}
</div>
</div>

View File

@ -130,7 +130,7 @@ export default {
<template #stages>
<div
data-testid="stage-column-title"
class="stage-column-title gl-display-flex gl-justify-content-space-between gl-relative gl-font-weight-bold gl-pipeline-job-width gl-text-truncate gl-line-height-36 gl-pl-4 gl-mb-n2"
class="stage-column-title gl-display-flex gl-justify-content-space-between gl-relative gl-font-weight-bold gl-pipeline-job-width gl-text-truncate gl-line-height-36 gl-pl-4 -gl-mb-2"
>
<span :title="name" class="gl-text-truncate gl-pr-3 gl-w-17/20">
{{ name }}

View File

@ -140,7 +140,7 @@ export default {
>
<template #list-item>
<div
class="gl-display-flex gl-align-items-center gl-justify-content-space-between gl-mt-n2 gl-mb-n2 gl-ml-n2"
class="gl-display-flex gl-align-items-center gl-justify-content-space-between -gl-mt-2 -gl-mb-2 -gl-ml-2"
>
<job-name-component
v-gl-tooltip.viewport.left
@ -155,7 +155,7 @@ export default {
:tooltip-text="jobActionTooltipText"
:link="status.action.path"
:action-icon="status.action.icon"
class="gl-mt-n2 gl-mr-n2"
class="-gl-mt-2 -gl-mr-2"
/>
</div>
</template>

View File

@ -153,7 +153,7 @@ export default {
<span v-if="commitTitle" class="gl-display-flex">
<tooltip-on-truncate
:title="commitTitle"
class="gl-flex-grow-1 gl-text-truncate gl-p-3 gl-ml-n3 gl-mr-n3 gl-mt-n3 gl-mb-n3"
class="gl-flex-grow-1 gl-text-truncate gl-p-3 -gl-ml-3 -gl-mr-3 -gl-mt-3 -gl-mb-3"
>
<gl-link
:href="commitUrl"

View File

@ -31,7 +31,7 @@ export default {
return this.value && this.value === this.checked;
},
imgClass() {
return 'gl-h-6 gl-mt-n2 gl-mr-2';
return 'gl-h-6 -gl-mt-2 gl-mr-2';
},
},
methods: {

View File

@ -36,6 +36,7 @@ const ICON_COLORS = {
merge: 'gl-bg-blue-100 gl-text-blue-700',
'issue-close': 'gl-bg-blue-100 gl-text-blue-700',
issues: 'gl-bg-green-100 gl-text-green-700',
error: 'gl-bg-red-100 gl-text-red-700',
};
export default {

View File

@ -258,6 +258,10 @@
top: calc(#{$calc-application-header-height} - 1px);
border-radius: $border-radius-default $border-radius-default 0 0;
box-shadow: 0 -4px 0 0 var(--white);
.gl-dark & {
box-shadow: 0 -4px 0 0 var(--gray-10);
}
}
/*

View File

@ -7,8 +7,8 @@ module Mutations
JobID = ::Types::GlobalIDType[::Ci::Build]
argument :id, JobID,
required: true,
description: 'ID of the job to mutate.'
required: true,
description: 'ID of the job to mutate.'
def find_object(id:)
GlobalID::Locator.locate(id)

View File

@ -7,9 +7,9 @@ module Mutations
graphql_name 'JobCancel'
field :job,
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
authorize :cancel_build

View File

@ -7,15 +7,15 @@ module Mutations
graphql_name 'JobPlay'
field :job,
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
argument :variables, [::Types::Ci::VariableInputType],
required: false,
default_value: [],
replace_null_with_default: true,
description: 'Variables to use when playing a manual job.'
required: false,
default_value: [],
replace_null_with_default: true,
description: 'Variables to use when playing a manual job.'
authorize :update_build

View File

@ -9,19 +9,19 @@ module Mutations
JobID = ::Types::GlobalIDType[::Ci::Processable]
argument :id, JobID,
required: true,
description: 'ID of the job to mutate.'
required: true,
description: 'ID of the job to mutate.'
field :job,
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
argument :variables, [::Types::Ci::VariableInputType],
required: false,
default_value: [],
replace_null_with_default: true,
description: 'Variables to use when retrying a manual job.'
required: false,
default_value: [],
replace_null_with_default: true,
description: 'Variables to use when retrying a manual job.'
authorize :update_build

View File

@ -7,9 +7,9 @@ module Mutations
graphql_name 'JobUnschedule'
field :job,
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
Types::Ci::JobType,
null: true,
description: 'Job after the mutation.'
authorize :update_build

View File

@ -11,14 +11,14 @@ module Mutations
ArtifactID = ::Types::GlobalIDType[::Ci::JobArtifact]
argument :id,
ArtifactID,
required: true,
description: 'ID of the artifact to delete.'
ArtifactID,
required: true,
description: 'ID of the artifact to delete.'
field :artifact,
Types::Ci::JobArtifactType,
null: true,
description: 'Deleted artifact.'
Types::Ci::JobArtifactType,
null: true,
description: 'Deleted artifact.'
def find_object(id:)
GlobalID::Locator.locate(id)

View File

@ -11,26 +11,26 @@ module Mutations
authorize :admin_project
argument :project_path, GraphQL::Types::ID,
required: true,
description: 'Project that the CI job token scope belongs to.'
required: true,
description: 'Project that the CI job token scope belongs to.'
argument :target_project_path, GraphQL::Types::ID,
required: true,
description: 'Project to be added to the CI job token scope.'
required: true,
description: 'Project to be added to the CI job token scope.'
argument :direction,
::Types::Ci::JobTokenScope::DirectionEnum,
required: false,
deprecated: {
reason: 'Outbound job token scope is being removed. This field can now only be set to INBOUND',
milestone: '16.0'
},
description: 'Direction of access, which defaults to INBOUND.'
::Types::Ci::JobTokenScope::DirectionEnum,
required: false,
deprecated: {
reason: 'Outbound job token scope is being removed. This field can now only be set to INBOUND',
milestone: '16.0'
},
description: 'Direction of access, which defaults to INBOUND.'
field :ci_job_token_scope,
Types::Ci::JobTokenScopeType,
null: true,
description: "CI job token's access scope."
Types::Ci::JobTokenScopeType,
null: true,
description: "CI job token's access scope."
def resolve(project_path:, target_project_path:, direction: nil)
project = authorized_find!(project_path)

View File

@ -11,26 +11,26 @@ module Mutations
authorize :admin_project
argument :project_path, GraphQL::Types::ID,
required: true,
description: 'Project that the CI job token scope belongs to.'
required: true,
description: 'Project that the CI job token scope belongs to.'
argument :target_project_path, GraphQL::Types::ID,
required: true,
description: 'Project to be removed from the CI job token scope.'
required: true,
description: 'Project to be removed from the CI job token scope.'
argument :direction,
::Types::Ci::JobTokenScope::DirectionEnum,
required: false,
deprecated: {
reason: 'Outbound job token scope is being removed. This field can now only be set to INBOUND',
milestone: '16.9'
},
description: 'Direction of access, which defaults to outbound.'
::Types::Ci::JobTokenScope::DirectionEnum,
required: false,
deprecated: {
reason: 'Outbound job token scope is being removed. This field can now only be set to INBOUND',
milestone: '16.9'
},
description: 'Direction of access, which defaults to outbound.'
field :ci_job_token_scope,
Types::Ci::JobTokenScopeType,
null: true,
description: "CI job token's scope of access."
Types::Ci::JobTokenScopeType,
null: true,
description: "CI job token's scope of access."
def resolve(project_path:, target_project_path:, direction: :outbound)
project = authorized_find!(project_path)

View File

@ -7,8 +7,8 @@ module Mutations
PipelineID = ::Types::GlobalIDType[::Ci::Pipeline]
argument :id, PipelineID,
required: true,
description: 'ID of the pipeline to mutate.'
required: true,
description: 'ID of the pipeline to mutate.'
private

View File

@ -7,9 +7,9 @@ module Mutations
graphql_name 'PipelineRetry'
field :pipeline,
Types::Ci::PipelineType,
null: true,
description: 'Pipeline after mutation.'
Types::Ci::PipelineType,
null: true,
description: 'Pipeline after mutation.'
authorize :update_pipeline

View File

@ -7,8 +7,8 @@ module Mutations
PipelineScheduleID = ::Types::GlobalIDType[::Ci::PipelineSchedule]
argument :id, PipelineScheduleID,
required: true,
description: 'ID of the pipeline schedule to mutate.'
required: true,
description: 'ID of the pipeline schedule to mutate.'
private

View File

@ -11,40 +11,40 @@ module Mutations
authorize :create_pipeline_schedule
argument :project_path, GraphQL::Types::ID,
required: true,
description: 'Full path of the project the pipeline schedule is associated with.'
required: true,
description: 'Full path of the project the pipeline schedule is associated with.'
argument :description, GraphQL::Types::String,
required: true,
description: 'Description of the pipeline schedule.'
required: true,
description: 'Description of the pipeline schedule.'
argument :cron, GraphQL::Types::String,
required: true,
description: 'Cron expression of the pipeline schedule.'
required: true,
description: 'Cron expression of the pipeline schedule.'
argument :cron_timezone, GraphQL::Types::String,
required: false,
description:
<<-STR
required: false,
description:
<<-STR
Cron time zone supported by ActiveSupport::TimeZone.
For example: "Pacific Time (US & Canada)" (default: "UTC").
STR
STR
argument :ref, GraphQL::Types::String,
required: true,
description: 'Ref of the pipeline schedule.'
required: true,
description: 'Ref of the pipeline schedule.'
argument :active, GraphQL::Types::Boolean,
required: false,
description: 'Indicates if the pipeline schedule should be active or not.'
required: false,
description: 'Indicates if the pipeline schedule should be active or not.'
argument :variables, [Mutations::Ci::PipelineSchedule::VariableInputType],
required: false,
description: 'Variables for the pipeline schedule.'
required: false,
description: 'Variables for the pipeline schedule.'
field :pipeline_schedule,
Types::Ci::PipelineScheduleType,
description: 'Created pipeline schedule.'
Types::Ci::PipelineScheduleType,
description: 'Created pipeline schedule.'
def resolve(project_path:, variables: [], **pipeline_schedule_attrs)
project = authorized_find!(project_path)

View File

@ -9,9 +9,9 @@ module Mutations
authorize :play_pipeline_schedule
field :pipeline_schedule,
Types::Ci::PipelineScheduleType,
null: true,
description: 'Pipeline schedule after mutation.'
Types::Ci::PipelineScheduleType,
null: true,
description: 'Pipeline schedule after mutation.'
def resolve(id:)
schedule = authorized_find!(id: id)

View File

@ -9,8 +9,8 @@ module Mutations
authorize :admin_pipeline_schedule
field :pipeline_schedule,
Types::Ci::PipelineScheduleType,
description: 'Updated pipeline schedule ownership.'
Types::Ci::PipelineScheduleType,
description: 'Updated pipeline schedule ownership.'
def resolve(id:)
schedule = authorized_find!(id: id)

View File

@ -9,36 +9,36 @@ module Mutations
authorize :update_pipeline_schedule
argument :description, GraphQL::Types::String,
required: false,
description: 'Description of the pipeline schedule.'
required: false,
description: 'Description of the pipeline schedule.'
argument :cron, GraphQL::Types::String,
required: false,
description: 'Cron expression of the pipeline schedule.'
required: false,
description: 'Cron expression of the pipeline schedule.'
argument :cron_timezone, GraphQL::Types::String,
required: false,
description:
<<-STR
required: false,
description:
<<-STR
Cron time zone supported by ActiveSupport::TimeZone.
For example: "Pacific Time (US & Canada)" (default: "UTC").
STR
STR
argument :ref, GraphQL::Types::String,
required: false,
description: 'Ref of the pipeline schedule.'
required: false,
description: 'Ref of the pipeline schedule.'
argument :active, GraphQL::Types::Boolean,
required: false,
description: 'Indicates if the pipeline schedule should be active or not.'
required: false,
description: 'Indicates if the pipeline schedule should be active or not.'
argument :variables, [Mutations::Ci::PipelineSchedule::VariableInputType],
required: false,
description: 'Variables for the pipeline schedule.'
required: false,
description: 'Variables for the pipeline schedule.'
field :pipeline_schedule,
Types::Ci::PipelineScheduleType,
description: 'Updated pipeline schedule.'
Types::Ci::PipelineScheduleType,
description: 'Updated pipeline schedule.'
def resolve(id:, variables: [], **pipeline_schedule_attrs)
schedule = authorized_find!(id: id)

View File

@ -9,20 +9,20 @@ module Mutations
RunnerID = ::Types::GlobalIDType[::Ci::Runner]
argument :ids, [RunnerID],
required: false,
description: 'IDs of the runners to delete.'
required: false,
description: 'IDs of the runners to delete.'
field :deleted_count,
::GraphQL::Types::Int,
null: true,
description: 'Number of records effectively deleted. ' \
'Only present if operation was performed synchronously.'
::GraphQL::Types::Int,
null: true,
description: 'Number of records effectively deleted. ' \
'Only present if operation was performed synchronously.'
field :deleted_ids,
[RunnerID],
null: true,
description: 'IDs of records effectively deleted. ' \
'Only present if operation was performed synchronously.'
[RunnerID],
null: true,
description: 'IDs of records effectively deleted. ' \
'Only present if operation was performed synchronously.'
def resolve(**runner_attrs)
if ids = runner_attrs[:ids]

View File

@ -47,7 +47,8 @@ module SystemNoteHelper
'relate_to_child' => 'link',
'unrelate_from_child' => 'link',
'relate_to_parent' => 'link',
'unrelate_from_parent' => 'link'
'unrelate_from_parent' => 'link',
'requested_changes' => 'error'
}.freeze
def system_note_icon_name(note)

View File

@ -109,7 +109,7 @@ module RelativePositioning
representative.model_class.transaction do
indexed.each_slice(100) do |batch|
mapping = batch.to_h.transform_values! do |i|
desired_pos = position + delta * (i + 1)
desired_pos = position + (delta * (i + 1))
{ relative_position: desired_pos.clamp(lower_bound, upper_bound) }
end

View File

@ -100,7 +100,7 @@ module DesignManagement
end
CREATION_TTL = 5.seconds
RETRY_DELAY = ->(num) { 0.2.seconds * num**2 }
RETRY_DELAY = ->(num) { 0.2.seconds * (num**2) }
def self.with_lock(project_id, repository, &block)
key = "with_lock:#{name}:{#{project_id}}"

View File

@ -240,10 +240,10 @@ module Integrations
failed_links = failed.map { |job| job_link(job) }
unless truncated.blank?
failed_links << s_("ChatMessage|and [%{count} more](%{pipeline_failed_jobs_url})") % {
failed_links << (s_("ChatMessage|and [%{count} more](%{pipeline_failed_jobs_url})") % {
count: truncated.size,
pipeline_failed_jobs_url: pipeline_failed_jobs_url
}
})
end
failed_links.join(I18n.t(:'support.array.words_connector'))

View File

@ -3,6 +3,7 @@
module Integrations
class Phorge < BaseIssueTracker
include HasIssueTrackerFields
include HasAvatar
PHORGE_FIELDS = %w[project_url issues_url].freeze

View File

@ -19,7 +19,7 @@ class MergeRequestDiff < ApplicationRecord
# The files_count column is a 2-byte signed integer. Look up the true value
# from the database if this sentinel is seen
FILES_COUNT_SENTINEL = 2**15 - 1
FILES_COUNT_SENTINEL = (2**15) - 1
# External diff cache key used by diffs export
EXTERNAL_DIFFS_CACHE_TMPDIR = 'project-%{project_id}-external-mr-%{mr_id}-diff-%{id}-cache'

View File

@ -178,8 +178,8 @@ class Namespace < ApplicationRecord
after_sync_traversal_ids :schedule_sync_event_worker # custom callback defined in Namespaces::Traversal::Linear
after_commit :expire_child_caches, on: :update, if: -> {
Feature.enabled?(:cached_route_lookups, self, type: :ops) &&
saved_change_to_name? || saved_change_to_path? || saved_change_to_parent_id?
(Feature.enabled?(:cached_route_lookups, self, type: :ops) &&
saved_change_to_name?) || saved_change_to_path? || saved_change_to_parent_id?
}
scope :user_namespaces, -> { where(type: Namespaces::UserNamespace.sti_name) }

View File

@ -88,7 +88,7 @@ module Network
if self.class.max_count / 2 < offset
# get max index that commit is displayed in the center.
offset - self.class.max_count / 2
offset - (self.class.max_count / 2)
else
0
end

View File

@ -852,7 +852,7 @@ class Note < ApplicationRecord
if user_visible_reference_count.present? && total_reference_count.present?
# if they are not equal, then there are private/confidential references as well
total_reference_count == 0 ||
user_visible_reference_count > 0 && user_visible_reference_count == total_reference_count
(user_visible_reference_count > 0 && user_visible_reference_count == total_reference_count)
else
refs = all_references(user)
refs.all

View File

@ -20,6 +20,6 @@ class NotificationReason
# returns the priority of a reason as an integer
def self.priority(reason)
REASON_PRIORITY.index(reason) || REASON_PRIORITY.length + 1
REASON_PRIORITY.index(reason) || (REASON_PRIORITY.length + 1)
end
end

View File

@ -11,7 +11,7 @@ module Packages
belongs_to :project
validates :package, :project, :fields, presence: true
validates :fields, json_schema: { filename: 'terraform_module_metadata' }
validates :fields, json_schema: { filename: 'terraform_module_metadata', detail_errors: true }
validate :terraform_module_package_type
validate :ensure_fields_size

View File

@ -156,7 +156,7 @@ class ProjectFeature < ApplicationRecord
return false if ::Gitlab::Pages.access_control_is_forced?
pages_access_level == PUBLIC || pages_access_level == ENABLED && project.public?
pages_access_level == PUBLIC || (pages_access_level == ENABLED && project.public?)
end
def private_pages?

View File

@ -25,7 +25,7 @@ class SystemNoteMetadata < ApplicationRecord
tag due_date start_date_or_due_date pinned_embed cherry_pick health_status approved unapproved
status alert_issue_added relate unrelate new_alert_added severity contact timeline_event
issue_type relate_to_child unrelate_from_child relate_to_parent unrelate_from_parent override
issue_email_participants
issue_email_participants requested_changes
].freeze
validates :note, presence: true, unless: :importing?

View File

@ -76,7 +76,7 @@ module Terraform
# recreated: https://gitlab.com/gitlab-org/gitlab/-/issues/258960
def migrate_legacy_version!(data:, version:, build:)
current_file = latest_version.file.read
current_version = parse_serial(current_file) || version - 1
current_version = parse_serial(current_file) || (version - 1)
update!(versioning_enabled: true)

View File

@ -8,5 +8,5 @@ class WebauthnRegistration < ApplicationRecord
validates :credential_xid, :public_key, :counter, presence: true
validates :name, length: { minimum: 0, allow_nil: false }
validates :counter,
numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 2**32 - 1 }
numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: (2**32) - 1 }
end

View File

@ -33,6 +33,8 @@ module MergeRequests
def create_requested_changes(merge_request)
merge_request.create_requested_changes(current_user)
SystemNoteService.requested_changes(merge_request, current_user)
trigger_merge_request_merge_status_updated(merge_request)
end

View File

@ -2,6 +2,8 @@
module Notes
class BaseService < ::BaseService
include Gitlab::InternalEventsTracking
def clear_noteable_diffs_cache(note)
if note.is_a?(DiffNote) &&
note.start_of_discussion? &&
@ -11,7 +13,13 @@ module Notes
end
def increment_usage_counter(note)
Gitlab::UsageDataCounters::NoteCounter.count(:create, note.noteable_type)
if note.noteable_type == 'Commit'
track_internal_event('create_commit_note', project: project, user: current_user)
elsif note.noteable_type == 'Snippet'
track_internal_event('create_snippet_note', project: project, user: current_user)
else
Gitlab::UsageDataCounters::NoteCounter.count(:create, note.noteable_type)
end
end
end
end

View File

@ -172,18 +172,6 @@ module Notes
track_note_creation_usage_for_merge_requests(note) if note.for_merge_request?
track_incident_action(user, note.noteable, 'incident_comment') if note.for_issue?
track_note_creation_in_ipynb(note)
metric_key_path = 'counts.commit_comment'
Gitlab::Tracking.event(
'Notes::CreateService',
'create_commit_comment',
project: project,
namespace: project&.namespace,
user: user,
label: metric_key_path,
context: [Gitlab::Usage::MetricDefinition.context_for(metric_key_path).to_context]
)
end
def tracking_data_for(note)

View File

@ -0,0 +1,42 @@
# frozen_string_literal: true
module Packages
module TerraformModule
module Metadata
class CreateService
def initialize(package, metadata_hash)
@package = package
@metadata_hash = metadata_hash
end
def execute
metadata = ::Packages::TerraformModule::Metadatum.new(
package: package,
project: package.project,
fields: metadata_hash,
updated_at: Time.current,
created_at: Time.current
)
if metadata.valid?
::Packages::TerraformModule::Metadatum.upsert(metadata.attributes, returning: false)
ServiceResponse.success(payload: { metadata: metadata })
else
Gitlab::ErrorTracking.track_exception(
ActiveRecord::RecordInvalid.new(metadata),
class: self.class.name,
package_id: package.id
)
ServiceResponse.error(message: metadata.errors.full_messages, reason: :bad_request)
end
end
private
attr_reader :package, :metadata_hash
end
end
end
end

View File

@ -5,7 +5,10 @@ module Packages
module Metadata
class ExtractFilesService
MAX_FILE_SIZE = 3.megabytes
MAX_PROCESSED_FILES_COUNT = 400
README_FILES = %w[README.md README].freeze
SUBMODULES_REGEX = /\bmodules\b/
EXAMPLES_REGEX = /\bexamples\b/
ExtractionError = Class.new(StandardError)
@ -15,6 +18,17 @@ module Packages
end
def execute
parse_file
aggregate_metadata_into_root
ServiceResponse.success(payload: metadata)
end
private
attr_reader :archive_file, :metadata
def parse_file
Tempfile.create('extracted_terraform_module_metadata') do |tmp_file|
process_archive do |entry|
case entry
@ -25,16 +39,12 @@ module Packages
end
end
end
ServiceResponse.success(payload: metadata)
end
private
attr_reader :archive_file, :metadata
def process_archive
archive_file.each do |entry|
archive_file.each_with_index do |entry, index|
raise ExtractionError, 'Too many files to process' if index >= MAX_PROCESSED_FILES_COUNT
next unless entry.file? && entry.size <= MAX_FILE_SIZE
yield(entry)
@ -42,36 +52,96 @@ module Packages
end
def process_tar_entry(tmp_file, entry)
return unless metadata_file?(entry.full_name)
module_type = module_type_from_path(entry.full_name)
return unless module_type
File.open(tmp_file.path, 'w+') do |file|
IO.copy_stream(entry, file)
file.rewind
raise ExtractionError, 'metadata file has the wrong entry size' if File.size(file) > MAX_FILE_SIZE
parse_and_merge_metadata(file, entry.full_name)
parse_and_merge_metadata(file, entry.full_name, module_type)
end
end
def process_zip_entry(tmp_file, entry)
return unless metadata_file?(entry.name)
module_type = module_type_from_path(entry.name)
return unless module_type
entry.extract(tmp_file.path) { true }
File.open(tmp_file.path) do |file|
parse_and_merge_metadata(file, entry.name)
parse_and_merge_metadata(file, entry.name, module_type)
end
rescue Zip::EntrySizeError => e
raise ExtractionError, "metadata file has the wrong entry size: #{e.message}"
end
def metadata_file?(entry_name)
File.extname(entry_name) == '.tf' || File.basename(entry_name).in?(README_FILES)
def module_type_from_path(path)
return unless File.extname(path) == '.tf' || File.basename(path).in?(README_FILES)
%i[root submodule example].detect do |type|
method(:"#{type}?").call(path)
end
end
def parse_and_merge_metadata(file, entry_name)
# Here we would call the ParseFileService to parse the file and extract the metadata.
# For now, we'll just return the file size & name as a placeholder.
metadata[entry_name] = "Size: #{file.size}"
def root?(path)
File.dirname(path).exclude?('/')
end
def submodule?(path)
File.dirname(path).match?(SUBMODULES_REGEX) &&
File.dirname(path).count('/').in?([1, 2]) &&
!File.dirname(path).end_with?('modules')
end
def example?(path)
File.dirname(path).match?(EXAMPLES_REGEX) &&
File.dirname(path).count('/').in?([1, 2]) &&
!File.dirname(path).end_with?('examples')
end
def parse_and_merge_metadata(file, entry_name, module_type)
parsed_content = ::Packages::TerraformModule::Metadata::ProcessFileService
.new(file, entry_name, module_type)
.execute
.payload
deep_merge_metadata(parsed_content)
end
def deep_merge_metadata(parsed_content)
return if parsed_content.empty?
metadata.deep_merge!(parsed_content) do |_, old, new|
[old, new].all?(Array) ? old.concat(new) : new
end
end
def aggregate_metadata_into_root
aggregate_submodules_and_examples(metadata[:submodules])
aggregate_submodules_and_examples(metadata[:examples], clear_data: true)
end
def aggregate_submodules_and_examples(data, clear_data: false)
return unless data
ensure_root_metadata_exists
data.each_value do |val|
metadata[:root][:resources] |= val[:resources]
metadata[:root][:dependencies][:modules] |= val.dig(:dependencies, :modules)
metadata[:root][:dependencies][:providers] |= val.dig(:dependencies, :providers)
val.except!(:resources, :dependencies) if clear_data
end
end
def ensure_root_metadata_exists
metadata[:root] ||= {}
metadata[:root][:resources] ||= []
metadata[:root][:dependencies] ||= {}
metadata[:root][:dependencies][:modules] ||= []
metadata[:root][:dependencies][:providers] ||= []
end
end
end

View File

@ -0,0 +1,211 @@
# frozen_string_literal: true
module Packages
module TerraformModule
module Metadata
class ParseHclFileService
COMMENT_NOTATIONS = %w[// #].freeze
RESOURCE_REGEX = /resource\s+"([^"]+)"\s+"([^"]+)"/
QUOTED_STRING_BOUNDARIES_REGEX = /\A"|"\Z/
TO_KEEP_ARGUMENTS = {
variable: %w[name type description default].freeze,
output: %w[name description].freeze
}.freeze
DEPENDENCY_REGEXES = {
source: /source\s*=\s*"([^"]+)"/,
version: /version\s*=\s*"([^"]+)"/
}.freeze
ARGUMENTS = {
variable: ['default =', 'type =', 'description =', 'validation {', 'sensitive =', 'nullable ='].freeze,
output: ['description =', 'value ='].freeze
}.freeze
HEREDOC_PREFIX_REGEX = /^<<-?/
PROVIDER_REGEXES = {
v012: /(\w+-?\w*)\s*=\s*"([^"]+)"/,
v013: /\s*([\w-]+)\s*=(?=\s*{)/
}.freeze
def initialize(file)
@file = file
@resources = []
@modules = []
@providers = []
@variables = []
@outputs = []
@block_data = {}
@current_block = nil
@current_argument = nil
@heredoc_tag = nil
end
def execute
return ServiceResponse.success(payload: {}) if file.blank?
file.each do |line|
next if skip_line?(line)
process_line(line)
end
ServiceResponse.success(payload: { resources: resources, modules: modules, providers: providers,
variables: variables, outputs: outputs })
end
private
attr_reader :file, :resources, :modules, :providers, :variables, :outputs
attr_accessor :block_data, :current_block, :current_argument, :heredoc_tag
def skip_line?(line)
line.strip.empty? || line.strip.start_with?(*COMMENT_NOTATIONS)
end
def process_line(line)
case line
when /^resource/, /^module/, /^provider/, /^variable/, /^output/, /^terraform/
start_new_block(determine_block_type(line), line)
else
process_block_content(line)
end
end
def determine_block_type(line)
line.split.first.to_sym
end
def start_new_block(block_type, line)
self.current_block = block_type
resources << line.match(RESOURCE_REGEX).captures.join('.') if block_type == :resource
block_data['name'] = line.sub(block_type.to_s, '').split.first if %i[resource terraform].exclude?(block_type)
end
def process_block_content(line)
block_end?(line) ? finalize_current_block : process_block_arguments(line)
end
def block_end?(line)
cond = line.start_with?('}') || (current_argument == :required_providers && line.strip.start_with?('}'))
cond && line.sub('}', '').strip.empty? && !heredoc_tag
end
def finalize_current_block
return if block_data.empty?
clean_block_data
store_block_data
reset_block_state
end
def clean_block_data
self.block_data = block_data.compact_blank.each_value do |v|
v.gsub!(QUOTED_STRING_BOUNDARIES_REGEX, '')&.strip!
end
end
def store_block_data
case current_block
when :module
modules << block_data unless block_data['source']&.start_with?('.')
when :provider, :terraform
providers << block_data
when :variable
variables << block_data.slice(*TO_KEEP_ARGUMENTS[:variable])
when :output
outputs << block_data.slice(*TO_KEEP_ARGUMENTS[:output])
end
end
def reset_block_state
self.block_data = {}
self.current_block = nil
self.current_argument = nil
end
def process_block_arguments(line)
case current_block
when :module
process_module_arguments(line)
when :variable, :output
process_variable_or_output_arguments(line)
when :terraform
process_terraform_arguments(line)
end
end
def process_module_arguments(line)
DEPENDENCY_REGEXES.each do |key, regex|
block_data[key.to_s] = Regexp.last_match(1) if line =~ regex
end
end
def process_variable_or_output_arguments(line)
args = ARGUMENTS[current_block.to_sym]
return process_argument_declaration(line, args) if argument_declared?(line, args)
return process_heredoc if current_argument && heredoc_tag && line.squish == heredoc_tag
append_argument_value(line)
end
def process_argument_declaration(line, args)
self.current_argument, argument_value = extract_argument(line, args)
is_heredoc = current_argument == 'description' && argument_value.start_with?('<<')
if is_heredoc
self.heredoc_tag = argument_value.sub(HEREDOC_PREFIX_REGEX, '').strip
block_data[current_argument] = +''
else
block_data[current_argument] = argument_value
end
end
def process_heredoc
self.heredoc_tag = nil
self.current_argument = nil
end
def append_argument_value(line)
return unless block_data[current_argument]
block_data[current_argument] << " #{line.squish}"
end
def process_terraform_arguments(line)
if line.strip.start_with?('required_providers')
self.current_argument = :required_providers
elsif current_argument
process_provider_arguments(line)
end
end
def process_provider_arguments(line)
if line =~ PROVIDER_REGEXES[:v012] && current_argument == :required_providers
block_data.merge!('name' => Regexp.last_match(1), 'version' => Regexp.last_match(2))
finalize_provider_block
elsif line =~ PROVIDER_REGEXES[:v013]
finalize_provider_block if block_data.any?
block_data['name'] = Regexp.last_match(1)
self.current_argument = block_data['name'].to_sym
elsif line =~ DEPENDENCY_REGEXES[:source]
block_data['source'] = Regexp.last_match(1)
elsif line =~ DEPENDENCY_REGEXES[:version]
block_data['version'] = Regexp.last_match(1)
end
end
def finalize_provider_block
providers << block_data
self.block_data = {}
end
def argument_declared?(line, args)
args.any? { |arg| line.squish.start_with?(arg) && current_argument != arg.split(' ').first }
end
def extract_argument(line, args)
arg = args.find { |arg| line.squish.start_with?(arg) }
[arg.split(' ').first, line.squish.sub(arg, '').strip]
end
end
end
end
end

View File

@ -0,0 +1,73 @@
# frozen_string_literal: true
module Packages
module TerraformModule
module Metadata
class ProcessFileService
README_FILES = %w[README.md README].freeze
def initialize(file, path, module_type)
@file = file
@path = path
@module_type = module_type
end
def execute
result = README_FILES.include?(file_name) ? parse_readme : parse_tf_file
ServiceResponse.success(payload: result)
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(
e,
class: self.class.name
)
end
private
attr_reader :file, :path, :module_type
def file_name
File.basename(path)
end
def module_name
File.basename(dirname)
end
def dirname
File.dirname(path)
end
def parse_readme
build_module_type_hash(:readme, file.read)
end
def parse_tf_file
parsed_hcl = ::Packages::TerraformModule::Metadata::ParseHclFileService.new(file).execute.payload
merge_module_type_hashes(parsed_hcl)
end
def merge_module_type_hashes(parsed_hcl)
build_module_type_hash(:resources, parsed_hcl[:resources])
.deep_merge(build_module_type_hash(:dependencies,
{ providers: parsed_hcl[:providers], modules: parsed_hcl[:modules] }))
.deep_merge(build_module_type_hash(:inputs, parsed_hcl[:variables]))
.deep_merge(build_module_type_hash(:outputs, parsed_hcl[:outputs]))
end
def build_module_type_hash(key, content)
case module_type
when :root
{ root: { key => content } }
when :submodule
{ submodules: { module_name => { key => content } } }
when :example
{ examples: { module_name => { key => content } } }
end
end
end
end
end
end

View File

@ -20,7 +20,11 @@ module Packages
result = ::Packages::TerraformModule::Metadata::ExtractFilesService.new(archive_file).execute
end
ServiceResponse.success(payload: result&.payload)
if result&.success?
::Packages::TerraformModule::Metadata::CreateService.new(package_file.package, result.payload).execute
end
ServiceResponse.success
end
private

View File

@ -383,6 +383,10 @@ module SystemNoteService
merge_requests_service(noteable, noteable.project, user).unapprove_mr
end
def requested_changes(noteable, user)
merge_requests_service(noteable, noteable.project, user).requested_changes
end
def change_alert_status(alert, author, reason = nil)
::SystemNotes::AlertManagementService.new(noteable: alert, project: alert.project, author: author).change_alert_status(reason)
end

View File

@ -203,6 +203,12 @@ module SystemNotes
create_note(NoteSummary.new(noteable, project, author, body, action: 'unapproved'))
end
def requested_changes
body = "requested changes"
create_note(NoteSummary.new(noteable, project, author, body, action: 'requested_changes'))
end
end
end

View File

@ -23,9 +23,7 @@
},
"additionalProperties": false,
"required": [
"name",
"description",
"type"
"name"
]
}
},
@ -43,32 +41,43 @@
},
"additionalProperties": false,
"required": [
"name",
"description"
"name"
]
}
},
"dependency": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"source": {
"type": "string"
},
"version": {
"type": "string"
}
},
"additionalProperties": false,
"required": [
"name"
]
},
"dependencies": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"source": {
"type": "string"
},
"version": {
"type": "string"
"type": "object",
"properties": {
"providers": {
"type": "array",
"items": {
"$ref": "#/definitions/dependency"
}
},
"additionalProperties": false,
"required": [
"name",
"source",
"version"
]
"modules": {
"type": "array",
"items": {
"$ref": "#/definitions/dependency"
}
}
}
},
"resources": {
@ -82,7 +91,7 @@
"root": {
"type": "object",
"properties": {
"description": {
"readme": {
"type": "string"
},
"inputs": {
@ -101,57 +110,49 @@
"additionalProperties": false
},
"submodules": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9_-]+$": {
"type": "object",
"properties": {
"readme": {
"type": "string"
},
"inputs": {
"$ref": "#/definitions/inputs"
},
"outputs": {
"$ref": "#/definitions/outputs"
},
"dependencies": {
"$ref": "#/definitions/dependencies"
},
"resources": {
"$ref": "#/definitions/resources"
}
},
"description": {
"type": "string"
},
"inputs": {
"$ref": "#/definitions/inputs"
},
"outputs": {
"$ref": "#/definitions/outputs"
},
"dependencies": {
"$ref": "#/definitions/dependencies"
},
"resources": {
"$ref": "#/definitions/resources"
}
},
"additionalProperties": false,
"required": [
"name"
]
"additionalProperties": false
}
}
},
"examples": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9_-]+$": {
"type": "object",
"properties": {
"readme": {
"type": "string"
},
"inputs": {
"$ref": "#/definitions/inputs"
},
"outputs": {
"$ref": "#/definitions/outputs"
}
},
"description": {
"type": "string"
},
"inputs": {
"$ref": "#/definitions/inputs"
},
"outputs": {
"$ref": "#/definitions/outputs"
}
},
"additionalProperties": false,
"required": [
"name"
]
"additionalProperties": false
}
}
}
},

View File

@ -1,25 +1,23 @@
.search-results-status.gl-sm-display-flex.gl-flex-wrap.gl-justify-content-space-between.gl-my-4{ class: ('gl-lg-display-none' if @search_objects.to_a.empty?) }
.search-results-status.gl-sm-display-flex.gl-items-start.gl-justify-content-space-between.gl-my-4{ class: ('gl-lg-display-none' if @search_objects.to_a.empty?) }
- unless @search_objects.to_a.empty?
%p.gl-text-truncate.gl-my-auto
%div
- unless @search_service_presenter.without_count?
= search_entries_info(@search_objects, @scope, @search_term)
%span= search_entries_info(@search_objects, @scope, @search_term)
- unless @search_service_presenter.show_snippets?
- if @project
- link_to_project = link_to(@project.full_name, @project, class: 'search-wrap-f-md-down')
- if @scope == 'blobs'
= _("in")
.mx-md-1.gl-my-auto
%span= _("in")
.gl-display-inline-block
#js-blob-ref-switcher{ data: { "project-id" => @project.id, "ref" => repository_ref(@project), "field-name": "repository_ref" } }
%p.gl-text-truncate.gl-my-auto
= s_('SearchCodeResults|of %{link_to_project}').html_safe % { link_to_project: link_to_project }
%span= s_('SearchCodeResults|of %{link_to_project}').html_safe % { link_to_project: link_to_project }
- else
= _("in project %{link_to_project}").html_safe % { link_to_project: link_to_project }
- elsif @group
- link_to_group = link_to(@group.name, @group, class: 'ml-md-1')
= _("in group %{link_to_group}").html_safe % { link_to_group: link_to_group }
.gl-display-flex
= render Pajamas::ButtonComponent.new(category: 'primary', icon: 'filter', button_options: {id: 'js-open-mobile-filters', class: 'xl:gl-hidden'}) do
.gl-flex.gl-gap-3.gl-mt-3.gl-sm-mt-0
= render Pajamas::ButtonComponent.new(category: 'primary', icon: 'filter', button_options: {id: 'js-open-mobile-filters', class: 'gl-lg-display-none gl-flex-grow-1 gl-md-flex-grow-0'}) do
= s_('GlobalSearch|Filters')
- if @search_service_presenter.show_sort_dropdown? && !@search_objects.to_a.empty?
.gl-ml-3
#js-search-sort{ data: { "search-sort-options" => search_sort_options.to_json } }
#js-search-sort{ data: { "search-sort-options" => search_sort_options.to_json }, class: "gl-flex-grow-1 gl-md-flex-grow-0" }

View File

@ -13,7 +13,7 @@
- c.with_body do
= safe_format(s_('Webhooks|The webhook failed to connect, and is disabled. To re-enable it, check %{strong_start}Recent events%{strong_end} for error details, then test your settings below.'), strong)
- elsif hook.temporarily_disabled?
- help_link = link_to('', help_page_path('user/project/integrations/webhooks', anchor: 'webhook-fails-or-multiple-webhook-requests-are-triggered'), target: '_blank', rel: 'noopener noreferrer')
- help_link = link_to('', help_page_path('user/project/integrations/webhooks', anchor: 'auto-disabled-webhooks'), target: '_blank', rel: 'noopener noreferrer')
- retry_time = { retry_time: time_interval_in_words(hook.disabled_until - Time.now) }
= render Pajamas::AlertComponent.new(title: s_('Webhooks|Webhook fails to connect'),
variant: :warning) do |c|

View File

@ -0,0 +1,20 @@
---
description: Commit comment created
internal_events: true
action: create_commit_note
identifiers:
- project
- namespace
- user
product_section: dev
product_stage: create
product_group: source_code
milestone: '17.0'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/150768
distributions:
- ce
- ee
tiers:
- free
- premium
- ultimate

View File

@ -0,0 +1,20 @@
---
description: Snippet comment created
internal_events: true
action: create_snippet_note
identifiers:
- project
- namespace
- user
product_section: dev
product_stage: create
product_group: source_code
milestone: '17.0'
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/150768
distributions:
- ce
- ee
tiers:
- free
- premium
- ultimate

View File

@ -1014,7 +1014,7 @@ production: &base
## OmniAuth settings
omniauth:
# Allow login via Twitter, Google, etc. using OmniAuth providers
# Allow login via Google, GitHub, etc. using OmniAuth providers
# enabled: true
# Uncomment this to automatically sign in with a specific omniauth provider's without
@ -1022,7 +1022,7 @@ production: &base
# auto_sign_in_with_provider: saml
# Sync user's profile from the specified Omniauth providers every time the user logs in (default: empty).
# Define the allowed providers using an array, e.g. ["saml", "twitter"],
# Define the allowed providers using an array, e.g. ["saml", "google_oauth2"],
# or as true/false to allow all providers or none.
# When authenticating using LDAP, the user's email is always synced.
# sync_profile_from_provider: []
@ -1035,7 +1035,7 @@ production: &base
# CAUTION!
# This allows users to login without having a user account first. Define the allowed providers
# using an array, e.g. ["saml", "twitter"], or as true/false to allow all providers or none.
# using an array, e.g. ["saml", "google_oauth2"], or as true/false to allow all providers or none.
# User accounts will be created automatically when authentication was successful.
allow_single_sign_on: ["saml"]
@ -1057,10 +1057,10 @@ production: &base
# Allow users with existing accounts to sign in and auto link their account via OmniAuth
# login, without having to do a manual login first and manually add OmniAuth. Links on email.
# Define the allowed providers using an array, e.g. ["saml", "twitter"], or as true/false to
# Define the allowed providers using an array, e.g. ["saml", "google_oauth2"], or as true/false to
# allow all providers or none.
# (default: false)
auto_link_user: ["saml", "twitter"]
auto_link_user: ["saml", "google_oauth2"]
# Set different Omniauth providers as external so that all users creating accounts
# via these providers will not be able to have access to internal projects. You
@ -1071,11 +1071,11 @@ production: &base
# CAUTION!
# This allows users to login with the specified providers without two factor. Define the allowed providers
# using an array, e.g. ["twitter", 'google_oauth2'], or as true/false to allow all providers or none.
# using an array, e.g. ["saml", 'google_oauth2'], or as true/false to allow all providers or none.
# This option should only be configured for providers which already have two factor.
# This configration dose not apply to SAML.
# (default: false)
allow_bypass_two_factor: ["twitter", 'google_oauth2']
allow_bypass_two_factor: ["saml", "google_oauth2"]
## Auth providers
# Uncomment the following lines and fill in the data of the auth provider you want to use
@ -1108,9 +1108,6 @@ production: &base
# - { name: 'facebook',
# app_id: 'YOUR_APP_ID',
# app_secret: 'YOUR_APP_SECRET' }
# - { name: 'twitter',
# app_id: 'YOUR_APP_ID',
# app_secret: 'YOUR_APP_SECRET' }
# - { name: 'jwt',
# args: {
# secret: 'YOUR_APP_SECRET',
@ -1612,9 +1609,6 @@ test:
- { name: 'facebook',
app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET' }
- { name: 'twitter',
app_id: 'YOUR_APP_ID',
app_secret: 'YOUR_APP_SECRET' }
- { name: 'jwt',
app_secret: 'YOUR_APP_SECRET',
args: {

View File

@ -1,18 +1,5 @@
import { generateEntries } from '../webpack.helpers';
const comment = '/* this is a virtual module used by Vite, it exists only in dev mode */\n';
export const virtualEntrypoints = Object.entries(generateEntries()).reduce(
(acc, [entryName, imports]) => {
const modulePath = imports[imports.length - 1];
const importPath = modulePath.startsWith('./') ? `~/${modulePath.substring(2)}` : modulePath;
acc[`${entryName}.js`] = `${comment}/* ${modulePath} */ import '${importPath}';\n`;
return acc;
},
{},
);
const entrypointsDir = '/javascripts/entrypoints/';
/**
* This Plugin provides virtual entrypoints for our automatic
* rails-route to entrypoint mapping during development
@ -29,8 +16,35 @@ const entrypointsDir = '/javascripts/entrypoints/';
* If the file doesn't exist, it loads an empty JS file.
*/
export function PageEntrypointsPlugin() {
const comment = '/* this is a virtual module used by Vite, it exists only in dev mode */\n';
const virtualEntrypoints = Object.entries(generateEntries()).reduce(
(acc, [entryName, imports]) => {
const modulePath = imports[imports.length - 1];
const importPath = modulePath.startsWith('./') ? `~/${modulePath.substring(2)}` : modulePath;
acc[`${entryName}.js`] = `${comment}/* ${modulePath} */ import '${importPath}';\n`;
return acc;
},
{},
);
const entrypointsDir = '/javascripts/entrypoints/';
const inputOptions = Object.keys(virtualEntrypoints).reduce((acc, value) => {
acc[value] = value;
return acc;
}, {});
return {
name: 'vite-plugin-page-entrypoints',
config() {
return {
build: {
rollupOptions: {
input: inputOptions,
},
},
};
},
load(id) {
if (!id.startsWith('pages.')) {
return undefined;
@ -38,9 +52,10 @@ export function PageEntrypointsPlugin() {
return virtualEntrypoints[id] ?? `/* doesn't exist */`;
},
resolveId(source) {
const fixedSource = source.replace(entrypointsDir, '');
if (fixedSource.startsWith('pages.')) return { id: fixedSource };
return undefined;
if (!source.startsWith(`${entrypointsDir}pages.`)) {
return undefined;
}
return { id: source.replace(entrypointsDir, '') };
},
};
}

View File

@ -8,11 +8,9 @@ product_group: source_code
value_type: number
status: active
time_frame: all
data_source: redis
instrumentation_class: RedisMetric
options:
prefix: note
event: create_snippet
data_source: internal_events
events:
- name: create_snippet_note
distribution:
- ce
- ee

View File

@ -8,13 +8,9 @@ product_group: source_code
value_type: number
status: active
time_frame: all
data_source: redis
instrumentation_class: RedisMetric
options:
prefix: note
event: create_commit
data_source: internal_events
events:
- name: commit_note_created
- name: create_commit_note
distribution:
- ce
- ee

View File

@ -371,12 +371,20 @@ to work with the collected data where you can visualize the output.
For a more fully featured dashboard, Grafana can be used and has
[official support for Prometheus](https://prometheus.io/docs/visualization/grafana/).
Sample Prometheus queries:
## Sample Prometheus queries
Below are some sample Prometheus queries that can be used.
NOTE:
These are only examples and may not work on all setups. Further adjustments may be required.
- **% Memory available:** `((node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) or ((node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes) / node_memory_MemTotal_bytes)) * 100`
- **% CPU utilization:** `1 - avg without (mode,cpu) (rate(node_cpu_seconds_total{mode="idle"}[5m]))`
- **% Memory available:** `((node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) or ((node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes) / node_memory_MemTotal_bytes)) * 100`
- **Data transmitted:** `rate(node_network_transmit_bytes_total{device!="lo"}[5m])`
- **Data received:** `rate(node_network_receive_bytes_total{device!="lo"}[5m])`
- **Disk read IOPS:** `sum by (instance) (rate(node_disk_reads_completed_total[1m]))`
- **Disk write IOPS**: `sum by (instance) (rate(node_disk_writes_completed_total[1m]))`
- **RPS via GitLab transaction count**: `sum(irate(gitlab_transaction_duration_seconds_count{controller!~'HealthController|MetricsController|'}[1m])) by (controller, action)`
## Prometheus as a Grafana data source

View File

@ -10,20 +10,20 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 200 requests per second (RPS), the typical peak load of up to 10,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 200 requests per second (RPS), the typical peak load of up to 10,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
NOTE:
Before deploying this architecture it's recommended to read through the [main documentation](index.md) first,
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-use) sections.
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-start-with) sections.
> - **Target load:** API: 200 RPS, Web: 20 RPS, Git (Pull): 20 RPS, Git (Push): 4 RPS
> - **High Availability:** Yes ([Praefect](#configure-praefect-postgresql) needs a third-party PostgreSQL solution for HA)
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid Alternative:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with)
| Service | Nodes | Configuration | GCP | AWS | Azure |
|------------------------------------------|-------|-------------------------|------------------|----------------|-----------|
@ -56,12 +56,13 @@ specifically the [Before you start](index.md#before-you-start) and [Deciding whi
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
7. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 10k
@ -165,7 +166,7 @@ against the following endpoint throughput targets:
- Git (Push): 4 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -2262,16 +2263,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 4 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 127.5 vCPU, 118 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 80 vCPU<br/>100 GB memory (request)<br/>140 GB memory (limit) | 3 x `n1-standard-32` | 3 x `c5.9xlarge` |
| Sidekiq | 12.6 vCPU<br/>28 GB memory (request)<br/>56 GB memory (limit) | 4 x `n1-standard-4` | 4 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-4` | 2 x `m5.xlarge` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -2306,7 +2310,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 10k
@ -2316,11 +2320,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x4" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
card "**Supporting Services** x2" as support
card "**Supporting Services**" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2378,55 +2382,60 @@ consul .[#e76a9b]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [10k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/10k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 200 RPS or 10,000 users we recommend a total Puma worker count of around 80.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 20
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 200 RPS or 10,000 users we recommend a total Puma worker count of around 80 so in turn it's recommended to run at
least 20 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 14 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
#### Supporting
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
The Supporting Node Pool is designed to house all supporting deployments that don't need to be
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
## Secrets
### Example config file
When setting up a Cloud Native Hybrid environment, it's worth noting that several secrets should be synced from backend VMs from the `/etc/gitlab/gitlab-secrets.json` file into Kubernetes.
For this setup specifically, the [GitLab Rails](https://docs.gitlab.com/charts/installation/secrets.html#gitlab-rails-secret) and [GitLab Shell](https://docs.gitlab.com/charts/installation/secrets.html#gitlab-rails-secret) secrets should be synced.
An example for the GitLab Helm Charts targetting the above 200 RPS or 10,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/10k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -10,7 +10,7 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 20 requests per second (RPS), the typical peak load of up to 1,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 20 requests per second (RPS), the typical peak load of up to 1,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
@ -21,7 +21,7 @@ For a full list of reference architectures, see
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid:** No. For a cloud native hybrid environment, you
> can follow a [modified hybrid reference architecture](#cloud-native-hybrid-reference-architecture-with-helm-charts).
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use).
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with).
| Users | Configuration | GCP | AWS | Azure |
|--------------|----------------------|----------------|--------------|----------|
@ -89,7 +89,7 @@ against the following endpoint throughput targets:
- Git (Push): 1 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).

View File

@ -10,20 +10,20 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 500 requests per second (RPS) - The typical peak load of up to 25,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 500 requests per second (RPS) - The typical peak load of up to 25,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
NOTE:
Before deploying this architecture it's recommended to read through the [main documentation](index.md) first,
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-use) sections.
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-start-with) sections.
> - **Target load:** API: 500 RPS, Web: 50 RPS, Git (Pull): 50 RPS, Git (Push): 10 RPS
> - **High Availability:** Yes ([Praefect](#configure-praefect-postgresql) needs a third-party PostgreSQL solution for HA)
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid Alternative:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with)
| Service | Nodes | Configuration | GCP | AWS | Azure |
|------------------------------------------|-------|-------------------------|------------------|--------------|-----------|
@ -56,12 +56,13 @@ specifically the [Before you start](index.md#before-you-start) and [Deciding whi
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
7. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 25k
@ -165,7 +166,7 @@ against the following endpoint throughput targets:
- Git (Push): 10 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -2268,16 +2269,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 7 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 223 vCPU, 206.5 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 140 vCPU<br/>175 GB memory (request)<br/>245 GB memory (limit) | 5 x `n1-standard-32` | 5 x `c5.9xlarge` |
| Sidekiq | 12.6 vCPU<br/>28 GB memory (request)<br/>56 GB memory (limit) | 4 x `n1-standard-4` | 4 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-4` | 2 x `m5.xlarge` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -2311,7 +2315,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 25k
@ -2321,11 +2325,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x7" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
card "**Supporting Services** x2" as support
card "**Supporting Services**" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2383,36 +2387,43 @@ consul .[#e76a9b]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [25k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/25k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 500 RPS or 25,000 users we recommend a total Puma worker count of around 140.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 35
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 500 RPS or 25,000 users we recommend a total Puma worker count of around 140 so in turn it's recommended to run at
least 35 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 14 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
@ -2420,12 +2431,16 @@ The Supporting Node Pool is designed to house all supporting deployments that do
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
### Example config file
An example for the GitLab Helm Charts targetting the above 500 RPS or 25,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/25k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -10,7 +10,7 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 40 requests per second (RPS), the typical peak load of up to 2,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 40 requests per second (RPS), the typical peak load of up to 2,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
@ -20,7 +20,7 @@ For a full list of reference architectures, see
> follow a modified [3K or 60 RPS reference architecture](3k_users.md#supported-modifications-for-lower-user-counts-ha).
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use).
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with).
| Service | Nodes | Configuration | GCP | AWS | Azure |
|------------------------------------|-------|------------------------|-----------------|--------------|----------|
@ -46,7 +46,8 @@ For a full list of reference architectures, see
However, if you have large monorepos (larger than several gigabytes) this can **significantly** impact Git and Gitaly performance and an increase of specifications will likely be required.
Refer to [large monorepos](index.md#large-monorepos) for more information.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
@ -108,7 +109,7 @@ against the following endpoint throughput targets:
- Git (Push): 1 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -1112,16 +1113,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|------------------------|-----------------|--------------|---------------------------------|
| Webservice | 3 | 8 vCPU, 7.2 GB memory | `n1-highcpu-8` | `c5.2xlarge` | 23.7 vCPU, 16.9 GB memory |
| Sidekiq | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.8 vCPU, 25.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 1.9 vCPU, 5.5 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 12 vCPU<br/>15 GB memory (request)<br/>21 GB memory (limit) | 3 x `n1-standard-8` | 3 x `c5.2xlarge` |
| Sidekiq | 3.6 vCPU<br/>8 GB memory (request)<br/>16 GB memory (limit) | 2 x `n1-standard-4` | 2 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-2` | 2 x `m5.large` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -1143,7 +1147,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 2k
@ -1153,11 +1157,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x3" as gitlab #32CD32
collections "**Sidekiq** x2" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
collections "**Supporting Services** x2" as support
collections "**Supporting Services**" as support
}
card "**Gitaly**" as gitaly #FF8C00
@ -1180,36 +1184,43 @@ sidekiq -[#ff8dd1]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [2k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/2k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because two worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 40 RPS or 2,000 users we recommend a total Puma worker count of around 12.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 3
Webservice pods with 4 workers per pod and 1 pod per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 40 RPS or 2,000 users we recommend a total Puma worker count of around 12 so in turn it's recommended to run at
least 3 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
4 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 4 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
@ -1217,12 +1228,16 @@ The Supporting Node Pool is designed to house all supporting deployments that do
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
### Example config file
An example for the GitLab Helm Charts for the above 40 RPS or 2,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/2k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -10,7 +10,7 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 60 requests per second (RPS), the typical peak load of up to 3,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 60 requests per second (RPS), the typical peak load of up to 3,000 users, both manual and automated, based on real data.
This architecture is the smallest one available with HA built in. If you require HA but
have a lower user count or total load the [Supported Modifications for lower user counts](#supported-modifications-for-lower-user-counts-ha)
@ -23,7 +23,7 @@ For a full list of reference architectures, see
> - **High Availability:** Yes, although [Praefect](#configure-praefect-postgresql) needs a third-party PostgreSQL solution
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid Alternative:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use).
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with).
| Service | Nodes | Configuration | GCP | AWS | Azure |
|-------------------------------------------|-------|-----------------------|-----------------|--------------|----------|
@ -54,12 +54,13 @@ For a full list of reference architectures, see
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
1. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
1. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 3k
@ -160,7 +161,7 @@ against the following endpoint throughput targets:
- Git (Push): 1 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -2250,16 +2251,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 2 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 31.8 vCPU, 24.8 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 16 vCPU<br/>20 GB memory (request)<br/>28 GB memory (limit) | 2 x `n1-standard-16` | 2 x `c5.4xlarge` |
| Sidekiq | 7.2 vCPU<br/>16 GB memory (request)<br/>32 GB memory (limit) | 3 x `n1-standard-4` | 3 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-2` | 2 x `m5.large` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -2292,7 +2296,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 3k
@ -2302,11 +2306,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x2" as gitlab #32CD32
collections "**Sidekiq** x3" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
card "**Supporting Services** x2" as support
card "**Supporting Services**" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2361,36 +2365,43 @@ consul .[#e76a9b]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [3k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/3k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 60 RPS or 3,000 users we recommend a total Puma worker count of around 16.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 4
Webservice pods with 4 workers per pod and 2 pods per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 60 RPS or 3,000 users we recommend a total Puma worker count of around 16 so in turn it's recommended to run at
least 4 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
8 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 8 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
@ -2398,12 +2409,16 @@ The Supporting Node Pool is designed to house all supporting deployments that do
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
### Example config file
An example for the GitLab Helm Charts for the above 60 RPS or 3,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/3k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -10,20 +10,20 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 1000 requests per second (RPS), the typical peak load of up to 50,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 1000 requests per second (RPS), the typical peak load of up to 50,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
NOTE:
Before deploying this architecture it's recommended to read through the [main documentation](index.md) first,
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-use) sections.
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-start-with) sections.
> - **Target load:** API: 1000 RPS, Web: 100 RPS, Git (Pull): 100 RPS, Git (Push): 20 RPS
> - **High Availability:** Yes ([Praefect](#configure-praefect-postgresql) needs a third-party PostgreSQL solution for HA)
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid Alternative:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with)
| Service | Nodes | Configuration | GCP | AWS | Azure |
|------------------------------------------|-------|-------------------------|------------------|---------------|-----------|
@ -55,12 +55,13 @@ specifically the [Before you start](index.md#before-you-start) and [Deciding whi
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
7. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 50k
@ -164,7 +165,7 @@ against the following endpoint throughput targets:
- Git (Push): 20 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -2282,16 +2283,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 510 vCPU, 472 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 308 vCPU<br/>385 GB memory (request)<br/>539 GB memory (limit) | 11 x `n1-standard-32` | 11 x `c5.9xlarge` |
| Sidekiq | 12.6 vCPU<br/>28 GB memory (request)<br/>56 GB memory (limit) | 4 x `n1-standard-4` | 4 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-4` | 2 x `m5.xlarge` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to specific nodes. A minimum of three nodes per node group in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -2325,7 +2329,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 50k
@ -2335,11 +2339,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x16" as gitlab #32CD32
collections "**Sidekiq** x4" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
card "**Supporting Services** x2" as support
card "**Supporting Services**" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2397,36 +2401,43 @@ consul .[#e76a9b]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [50k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/50k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 1000 RPS or 50,000 users we recommend a total Puma worker count of around 320.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 80
Webservice pods with 4 workers per pod and 5 pods per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 500 RPS or 25,000 users we recommend a total Puma worker count of around 308 so in turn it's recommended to run at
least 77 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
14 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 8 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
@ -2434,12 +2445,16 @@ The Supporting Node Pool is designed to house all supporting deployments that do
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
### Example config file
An example for the GitLab Helm Charts targetting the above 1000 RPS or 50,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/50k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -10,20 +10,20 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
This page describes the GitLab reference architecture designed to target a peak load of 100 requests per second (RPS) - The typical peak load of up to 5,000 users, both manual and automated, based on real data with headroom added.
This page describes the GitLab reference architecture designed to target a peak load of 100 requests per second (RPS) - The typical peak load of up to 5,000 users, both manual and automated, based on real data.
For a full list of reference architectures, see
[Available reference architectures](index.md#available-reference-architectures).
NOTE:
Before deploying this architecture it's recommended to read through the [main documentation](index.md) first,
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-use) sections.
specifically the [Before you start](index.md#before-you-start) and [Deciding which architecture to use](index.md#deciding-which-architecture-to-start-with) sections.
> - **Target load:** API: 100 RPS, Web: 10 RPS, Git (Pull): 10 RPS, Git (Push): 2 RPS
> - **High Availability:** Yes ([Praefect](#configure-praefect-postgresql) needs a third-party PostgreSQL solution for HA)
> - **Estimated Costs:** [See cost table](index.md#cost-to-run)
> - **Cloud Native Hybrid Alternative:** [Yes](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-use)
> - **Unsure which Reference Architecture to use?** [Go to this guide for more info](index.md#deciding-which-architecture-to-start-with)
| Service | Nodes | Configuration | GCP | AWS | Azure |
|-------------------------------------------|-------|-------------------------|-----------------|--------------|----------|
@ -54,12 +54,13 @@ specifically the [Before you start](index.md#before-you-start) and [Deciding whi
Review the existing [technical limitations and considerations before deploying Gitaly Cluster](../gitaly/index.md#before-deploying-gitaly-cluster). If you want sharded Gitaly, use the same specs listed above for `Gitaly`.
6. Gitaly specifications are based on high percentiles of both usage patterns and repository sizes in good health.
However, if you have [large monorepos](index.md#large-monorepos) (larger than several gigabytes) or [additional workloads](index.md#additional-workloads) these can *significantly* impact Git and Gitaly performance and further adjustments will likely be required.
7. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, for GitLab Rails certain processes like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) should be run on only one node.
6. Can be placed in Auto Scaling Groups (ASGs) as the component doesn't store any [stateful data](index.md#autoscaling-of-stateful-nodes).
However, [Cloud Native Hybrid setups](#cloud-native-hybrid-reference-architecture-with-helm-charts-alternative) are generally preferred as certain components
such as like [migrations](#gitlab-rails-post-configuration) and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 5k
@ -160,7 +161,7 @@ against the following endpoint throughput targets:
- Git (Push): 2 RPS
The above targets were selected based on real customer data of total environmental loads corresponding to the user count,
including CI and other workloads along with additional substantial headroom added.
including CI and other workloads.
If you have metrics to suggest that you have regularly higher throughput against the above endpoint targets, [large monorepos](index.md#large-monorepos)
or notable [additional workloads](index.md#additional-workloads) these can notably impact the performance environment and [further adjustments may be required](index.md#scaling-an-environment).
@ -2225,16 +2226,19 @@ as the typical environment above.
First are the components that run in Kubernetes. These run across several node groups, although you can change
the overall makeup as desired as long as the minimum CPU and Memory requirements are observed.
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|-------------------- |-------|-------------------------|-----------------|--------------|---------------------------------|
| Webservice | 5 | 16 vCPU, 14.4 GB memory | `n1-highcpu-16` | `c5.4xlarge` | 79.5 vCPU, 62 GB memory |
| Sidekiq | 3 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 11.8 vCPU, 38.9 GB memory |
| Supporting services | 2 | 2 vCPU, 7.5 GB memory | `n1-standard-2` | `m5.large` | 3.9 vCPU, 11.8 GB memory |
| Component Node Group | Target Node Pool Totals | GCP Example | AWS Example |
|----------------------|-------------------------|-----------------|--------------|
| Webservice | 36 vCPU<br/>45 GB memory (request)<br/>63 GB memory (limit) | 3 x `n1-standard-16` | 3 x `c5.4xlarge` |
| Sidekiq | 7.2 vCPU<br/>16 GB memory (request)<br/>32 GB memory (limit) | 3 x `n1-standard-4` | 3 x `m5.xlarge` |
| Supporting services | 4 vCPU<br/>15 GB memory | 2 x `n1-standard-2` | 2 x `m5.large` |
- For this setup, we **recommend** and regularly [test](index.md#validation-and-test-results)
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- Nodes configuration is shown as it is forced to ensure pod vCPU / memory ratios and avoid scaling during **performance testing**.
- In production deployments, there is no need to assign pods to nodes. A minimum of three nodes in three different availability zones is strongly recommended to align with resilient cloud architecture practices.
[Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) and [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). Other Kubernetes services may also work, but your mileage may vary.
- GCP and AWS examples of how to reach the Target Node Pool Total are given for convenience. These sizes are used in performance testing but following the example is not required. Different node pool designs can be used as desired as long as the targets are met, and all pods can deploy.
- The [Webservice](#webservice) and [Sidekiq](#sidekiq) target node pool totals are given for GitLab components only. Additional resources are required for the chosen Kubernetes provider's system processes. The given examples take this into account.
- The [Supporting](#supporting) target node pool total is given generally to accommodate several resources for supporting the GitLab deployment as well as any additional deployments you may wish to make depending on your requirements. Similar to the other node pools, the chosen Kubernetes provider's system processes also require resources. The given examples take this into account.
- In production deployments, it's not required to assign pods to specific nodes. However, it is recommended to have several nodes in each pool spread across different availability zones to align with resilient cloud architecture practices.
- Enabling autoscaling, such as Cluster Autoscaler, for efficiency reasons is encouraged, but it's generally recommended targeting a floor of 75% for Webservice and Sidekiq pods to ensure ongoing performance.
Next are the backend components that run on static compute VMs using the Linux package (or External PaaS
services where applicable):
@ -2267,7 +2271,7 @@ services where applicable):
<!-- markdownlint-enable MD029 -->
NOTE:
For all PaaS solutions that involve configuring instances, it is strongly recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
For all PaaS solutions that involve configuring instances, it's recommended to implement a minimum of three nodes in three different availability zones to align with resilient cloud architecture practices.
```plantuml
@startuml 5k
@ -2277,11 +2281,11 @@ card "Kubernetes via Helm Charts" as kubernetes {
card "**External Load Balancer**" as elb #6a9be7
together {
collections "**Webservice** x5" as gitlab #32CD32
collections "**Sidekiq** x3" as sidekiq #ff8dd1
collections "**Webservice**" as gitlab #32CD32
collections "**Sidekiq**" as sidekiq #ff8dd1
}
card "**Supporting Services** x2" as support
card "**Supporting Services**" as support
}
card "**Internal Load Balancer**" as ilb #9370DB
@ -2336,36 +2340,43 @@ consul .[#e76a9b]--> redis
@enduml
```
### Resource usage settings
### Kubernetes component targets
The following formulas help when calculating how many pods may be deployed within resource constraints.
The [5k reference architecture example values file](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/5k.yaml)
documents how to apply the calculated configuration to the Helm Chart.
The following section details the targets used for the GitLab components deployed in Kubernetes.
#### Webservice
Webservice pods typically need about 1 CPU and 1.25 GB of memory _per worker_.
Each Webservice pod consumes roughly 4 CPUs and 5 GB of memory using
the [recommended topology](#cluster-topology) because four worker processes
are created by default and each pod has other small processes running.
Each Webservice pod (Puma and Workhorse) is recommended to be run with the following configuration:
For 100 RPS or 5,000 users we recommend a total Puma worker count of around 40.
With the [provided recommendations](#cluster-topology) this allows the deployment of up to 10
Webservice pods with 4 workers per pod and 2 pods per node. Expand available resources using
the ratio of 1 CPU to 1.25 GB of memory _per each worker process_ for each additional
Webservice pod.
- 4 Puma Workers
- 4 vCPU
- 5 GB memory (request)
- 7 GB memory (limit)
For further information on resource usage, see the [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
For 100 RPS or 5,000 users we recommend a total Puma worker count of around 36 so in turn it's recommended to run at
least 9 Webservice pods.
For further information on Webservice resource usage, see the Charts documentation on [Webservice resources](https://docs.gitlab.com/charts/charts/gitlab/webservice/#resources).
##### NGINX
It's also recommended deploying the NGINX controller pods across the Webservice nodes as a DaemonSet. This is to allow the controllers to scale dynamically with the Webservice pods they serve as well as take advantage of the higher network bandwidth larger machine types typically have.
Note that this isn't a strict requirement. The NGINX controller pods can be deployed as desired as long as they have enough resources to handle the web traffic.
#### Sidekiq
Sidekiq pods should generally have 0.9 CPU and 2 GB of memory.
Each Sidekiq pod is recommended to be run with the following configuration:
[The provided starting point](#cluster-topology) allows the deployment of up to
8 Sidekiq pods. Expand available resources using the 0.9 CPU to 2 GB memory
ratio for each additional pod.
- 1 Sidekiq worker
- 900m vCPU
- 2 GB memory (request)
- 4 GB memory (limit)
For further information on resource usage, see the [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
Similar to the standard deployment above, an initial target of 8 Sidekiq workers has been used here.
Additional workers may be required depending on your specific workflow.
For further information on Sidekiq resource usage, see the Charts documentation on [Sidekiq resources](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/#resources).
### Supporting
@ -2373,12 +2384,16 @@ The Supporting Node Pool is designed to house all supporting deployments that do
on the Webservice and Sidekiq pools.
This includes various deployments related to the Cloud Provider's implementation and supporting
GitLab deployments such as NGINX or [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
GitLab deployments such as [GitLab Shell](https://docs.gitlab.com/charts/charts/gitlab/gitlab-shell/).
If you wish to make any additional deployments, such as for Monitoring, it's recommended
If you wish to make any additional deployments such as Container Registry, Pages or Monitoring, it's recommended
to deploy these in this pool where possible and not in the Webservice or Sidekiq pools, as the Supporting pool has been designed
specifically to accommodate several additional deployments. However, if your deployments don't fit into the
pool as given, you can increase the node pool accordingly.
pool as given, you can increase the node pool accordingly. Conversely, if the pool in your use case is over-provisioned you can reduce accordingly.
### Example config file
An example for the GitLab Helm Charts targetting the above 100 RPS or 5,000 reference architecture configuration [can be found in the Charts project](https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/examples/ref/5k.yaml).
<div align="right">
<a type="button" class="btn btn-default" href="#set-up-components">

View File

@ -12,16 +12,16 @@ DETAILS:
**Offering:** Self-managed
The GitLab Reference Architectures have been designed and tested by the
GitLab Test Platform and Support teams to provide scalable recommended deployments for target loads.
GitLab Test Platform and Support teams to provide recommended scalable and elastic deployments as starting points for target loads.
## Available reference architectures
The following Reference Architectures are available as recommended starting points for your environment.
The architectures are named in terms of peak load, based on user count or Requests per Second (RPS). Where the latter has been calculated based on average real data of the former with headroom added.
The architectures are named in terms of peak load, based on user count or Requests per Second (RPS). Where the latter has been calculated based on average real data.
NOTE:
Each architecture has been designed to be [scalable and can be adjusted accordingly if required](#scaling-an-environment) by your specific workload. This may be likely in known heavy scenarios such as using [large monorepos](#large-monorepos) or notable [additional workloads](#additional-workloads).
Each architecture has been designed to be [scalable and elastic](#scaling-an-environment). As such, they can be adjusted accordingly if required by your specific workload. This may be likely in known heavy scenarios such as using [large monorepos](#large-monorepos) or notable [additional workloads](#additional-workloads).
For details about what each Reference Architecture has been tested against, see the "Testing Methodology" section of each page.
@ -56,30 +56,32 @@ Running any application in production is complex, and the same applies for GitLa
As such, it's recommended that you have a working knowledge of running and maintaining applications in production when deciding on going down this route. If you aren't in this position, our [Professional Services](https://about.gitlab.com/services/#implementation-services) team offers implementation services, but for those who want a more managed solution long term, it's recommended to instead explore our other offerings such as [GitLab SaaS](../../subscriptions/gitlab_com/index.md) or [GitLab Dedicated](../../subscriptions/gitlab_dedicated/index.md).
If Self Managed is the approach you're considering, it's strongly encouraged to read through this page in full, in particular the [Deciding which architecture to use](#deciding-which-architecture-to-use), [Large monorepos](#large-monorepos) and [Additional workloads](#additional-workloads) sections.
If Self Managed is the approach you're considering, it's strongly encouraged to read through this page in full, in particular the [Deciding which architecture to use](#deciding-which-architecture-to-start-with), [Large monorepos](#large-monorepos) and [Additional workloads](#additional-workloads) sections.
## Deciding which architecture to use
## Deciding which architecture to start with
The Reference Architectures are designed to strike a balance between two important factors--performance and resilience.
The Reference Architectures are designed to strike a balance between three important factors--performance, resilience and costs.
While they are designed to make it easier to set up GitLab at scale, it can still be a challenge to know which one meets your requirements.
While they are designed to make it easier to set up GitLab at scale, it can still be a challenge to know which one meets your requirements and where to start accordingly.
As a general guide, **the more performant and/or resilient you want your environment to be, the more complex it is**.
This section explains the designs you can choose from. It begins with the least complexity, goes to the most, and ends with a decision tree.
This section explains the things to consider when picking a Reference Architecture to start with.
### Expected Load (RPS or user count)
### Expected Load
The first thing to check is what the expected peak load is your environment would be expected to serve.
Each architecture is described in terms of peak Requests per Second (RPS) or user count load. As detailed under the "Testing Methodology" section on each page, each architecture is tested
against its listed RPS for each endpoint type (API, Web, Git), which is the typical peak load of the given user count, both manual and automated, with headroom.
against its listed RPS for each endpoint type (API, Web, Git), which is the typical peak load of the given user count, both manual and automated.
It's strongly recommended finding out what peak RPS your environment will be expected to handle across endpoint types, through existing metrics (such as [Prometheus](../monitoring/prometheus/gitlab_metrics.md))
It's strongly recommended finding out what peak RPS your environment will be expected to handle across endpoint types, through existing metrics (such as [Prometheus](../monitoring/prometheus/index.md#sample-prometheus-queries))
or estimates, and to select the corresponding architecture as this is the most objective.
#### If in doubt, pick the closest user count and scale accordingly
If it's not possible for you to find out the expected peak RPS then it's recommended to select based on user count to start and then monitor the environment
closely to confirm the RPS, whether the architecture is performing and adjust accordingly is necessary.
closely to confirm the RPS, whether the architecture is performing and [scale accordingly](#scaling-an-environment) as necessary.
### Standalone (non-HA)
@ -267,7 +269,7 @@ the following guidance is followed to ensure the best chance of good performance
### Additional workloads
These reference architectures have been [designed and tested](index.md#validation-and-test-results) for standard GitLab
setups with good headroom in mind to cover most scenarios.
setups based on real data.
However, additional workloads can multiply the impact of operations by triggering follow-up actions.
You may need to adjust the suggested specifications to compensate if you use, for example:
@ -307,12 +309,12 @@ We dont recommend the use of round-robin algorithms as they are known to not
The total network bandwidth available to a load balancer when deployed on a machine can vary notably across Cloud Providers. In particular some Cloud Providers, like [AWS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html), may operate on a burst system with credits to determine the bandwidth at any time.
The network bandwidth your environment's load balancers will require is dependent on numerous factors such as data shape and workload. The recommended base sizes for each Reference Architecture class have been selected to give a good level of bandwidth with adequate headroom but in some scenarios, such as consistent clones of [large monorepos](#large-monorepos), the sizes may need to be adjusted accordingly.
The network bandwidth your environment's load balancers will require is dependent on numerous factors such as data shape and workload. The recommended base sizes for each Reference Architecture class have been selected based on real data but in some scenarios, such as consistent clones of [large monorepos](#large-monorepos), the sizes may need to be adjusted accordingly.
### No swap
Swap is not recommended in the reference architectures. It's a failsafe that impacts performance greatly. The
reference architectures are designed to have memory headroom to avoid needing swap.
reference architectures are designed to have enough memory in most cases to avoid needing swap.
### Praefect PostgreSQL
@ -386,7 +388,7 @@ Additionally, the following cloud provider services are recommended for use as p
</tr>
<tr>
<td>Database</td>
<td>🟢 &nbsp; <a href="https://cloud.google.com/sql" target="_blank" rel="noopener noreferrer">Cloud SQL</a></td>
<td>🟢 &nbsp; <a href="https://cloud.google.com/sql" target="_blank" rel="noopener noreferrer">Cloud SQL<sup>1</sup></a></td>
<td>🟢 &nbsp; <a href="https://aws.amazon.com/rds/" target="_blank" rel="noopener noreferrer">RDS</a></td>
<td>🟢 &nbsp; <a href="https://azure.microsoft.com/en-gb/products/postgresql/" target="_blank" rel="noopener noreferrer">Azure Database for PostgreSQL Flexible Server</a></td>
<td></td>
@ -401,6 +403,12 @@ Additionally, the following cloud provider services are recommended for use as p
</tbody>
</table>
<!-- Disable ordered list rule https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md#md029---ordered-list-item-prefix -->
<!-- markdownlint-disable MD029 -->
1. The [Enterprise Plus edition](https://cloud.google.com/sql/docs/editions-intro) for GCP Cloud SQL is generally recommended for optimal performance. This recommendation is especially so for larger environments (500 RPS / 25k users or higher). Max connections may need to be adjusted higher than the service's defaults depending on workload.
2. It's strongly recommended deploying the [Premium tier of Azure Cache for Redis](https://learn.microsoft.com/en-us/azure/azure-cache-for-redis/cache-overview#service-tiers) to ensure good performance.
<!-- markdownlint-enable MD029 -->
### Recommendation notes for the database services
[When selecting to use an external database service](../postgresql/external.md), it should run a standard, performant, and [supported version](../../install/requirements.md#postgresql-requirements).
@ -409,9 +417,9 @@ If you choose to use a third party external service:
1. Note that the HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service.
1. The number of nodes required to achieve HA may differ depending on the service compared to the Linux package and doesn't need to match accordingly.
1. However, if [Database Load Balancing](../postgresql/database_load_balancing.md) via Read Replicas is desired for further improved performance it's recommended to follow the node count for the Reference Architecture.
1. It's recommended in general to enable Read Replicas for [Database Load Balancing](../postgresql/database_load_balancing.md) if possible, matching the node counts for the standard Linux package deployment. This recommendation is especially so for larger environments (over 200 RPS / 10k users).
1. Ensure that if a pooler is offered as part of the service that it can handle the total load without bottlenecking.
For example, Azure Database for PostgreSQL Flexible Server can optionally deploy a PgBouncer pooler in front of the Database, but PgBouncer is single threaded, so this in turn may cause bottlenecking. However, if using Database Load Balancing, this could be enabled on each node in distributed fashion to compensate.
For example, Azure Database for PostgreSQL Flexible Server can optionally deploy a PgBouncer pooler in front of the Database, but PgBouncer is single threaded, so this in turn may cause bottlenecking. However, if using Database Load Balancing, this could be enabled on each node in distributed fashion to compensate.
1. If [GitLab Geo](../geo/index.md) is to be used the service will need to support Cross Region replication.
### Recommendation notes for the Redis services
@ -468,12 +476,12 @@ This also applies to other third-party stateful components such as Postgres and
#### Autoscaling of stateful nodes
As a general guidance, only _stateless_ components of GitLab can be run in Autoscaling groups, namely GitLab Rails
and Sidekiq.
Other components that have state, such as Gitaly, are not supported in this fashion (for more information, see [issue 2997](https://gitlab.com/gitlab-org/gitaly/-/issues/2997)).
and Sidekiq. Other components that have state, such as Gitaly, are not supported in this fashion (for more information, see [issue 2997](https://gitlab.com/gitlab-org/gitaly/-/issues/2997)).
This also applies to other third-party stateful components such as Postgres and Redis, but you can explore other third-party solutions for those components if desired such as supported Cloud Provider services unless called out specifically as unsupported.
However, [Cloud Native Hybrid setups](#cloud-native-hybrid) are generally preferred over ASGs as certain components such as like database migrations and [Mailroom](../incoming_email.md) can only be run on one node, which is handled better in Kubernetes.
#### Spreading one environment over multiple data centers
Deploying one GitLab environment over multiple data centers is not supported due to potential split brain edge cases
@ -523,7 +531,7 @@ per 1,000 users:
- Git (Pull): 2 RPS
- Git (Push): 0.4 RPS (rounded to the nearest integer)
The above RPS targets were selected based on real customer data of total environmental loads corresponding to the user count, including CI and other workloads along with additional substantial headroom added.
The above RPS targets were selected based on real customer data of total environmental loads corresponding to the user count, including CI and other workloads.
### How to interpret the results
@ -627,11 +635,16 @@ table.test-coverage th {
## Cost to run
As a starting point, the following table details initial costs for the different reference architectures across GCP, AWS, and Azure through the Linux package.
As a starting point, the following table details initial costs for the different reference architectures across GCP, AWS, and Azure through the Linux package via each cloud provider's official calculator.
NOTE:
Due to the nature of Cloud Native Hybrid, it's not possible to give a static cost calculation.
Bare-metal costs are also not included here as it varies widely depending on each configuration.
However, please be aware of the following caveats:
- These are only rough estimates for the Linux package environments.
- They do not take into account dynamic elements such as disk, network or object storage.
- Due to the nature of Cloud Native Hybrid, it's not possible to give a static cost calculation for that deployment.
- Bare-metal costs are also not included here as it varies widely depending on each configuration.
Due to the above it's strongly recommended taking these calculators and adjusting them as close as possible to your specific setup and usage as much as possible to get a more accurate estimate.
<table class="test-coverage">
<col>
@ -698,20 +711,9 @@ Maintaining a Reference Architecture environment is generally the same as any ot
In this section you'll find links to documentation for relevant areas as well as any specific Reference Architecture notes.
### Upgrades
Upgrades for a Reference Architecture environment is the same as any other GitLab environment.
The main [Upgrade GitLab](../../update/index.md) section has detailed steps on how to approach this.
[Zero-downtime upgrades](#zero-downtime-upgrades) are also available.
NOTE:
You should upgrade a Reference Architecture in the same order as you created it.
### Scaling an environment
The Reference Architectures have been designed to support scaling in various ways depending on your use case and circumstances.
This can be done iteratively or wholesale to the next size of architecture depending on if metrics suggest a component is being exhausted.
The Reference Architectures have been designed as a starting point and are elastic and scalable throughout. It's more likely than not that you may want to adjust the environment for your specific needs after deployment for reasons such as additional performance capacity or reduced costs. This is expected and, as such, scaling can be done iteratively or wholesale to the next size of architecture depending on if metrics suggest a component is being exhausted.
NOTE:
If you're seeing a component continuously exhausting it's given resources it's strongly recommended for you to reach out to our [Support team](https://about.gitlab.com/support/) before performing any scaling. This is especially so if you're planning to scale any component significantly.
@ -730,7 +732,7 @@ You should take an iterative approach when scaling downwards, however, to ensure
In some cases scaling a component significantly may result in knock on effects for downstream components, impacting performance. The Reference Architectures were designed with balance in mind to ensure components that depend on each other are congruent in terms of specs. As such you may find when notably scaling a component that it's increase may result in additional throughput being passed to the other components it depends on and that they, in turn, may need to be scaled as well.
NOTE:
As a general rule most components have good headroom to accommodate an upstream component being scaled, so this is typically on a case by case basis and specific to what has been changed. It's recommended for you to reach out to our [Support team](https://about.gitlab.com/support/) before you make any significant changes to the environment.
The Reference Architectures have been designed to have elasticity to accommodate an upstream component being scaled. However, it's still generally recommended for you to reach out to our [Support team](https://about.gitlab.com/support/) before you make any significant changes to the environment to be safe.
The following components can impact others when they have been significantly scaled:
@ -749,6 +751,16 @@ documentation for each as follows
- [Postgres to multi-node Postgres w/ Consul + PgBouncer](../postgresql/moving.md)
- [Gitaly to Gitaly Cluster w/ Praefect](../gitaly/index.md#migrate-to-gitaly-cluster)
### Upgrades
Upgrades for a Reference Architecture environment is the same as any other GitLab environment.
The main [Upgrade GitLab](../../update/index.md) section has detailed steps on how to approach this.
[Zero-downtime upgrades](#zero-downtime-upgrades) are also available.
NOTE:
You should upgrade a Reference Architecture in the same order as you created it.
### Monitoring
There are numerous options available to monitor your infrastructure, as well as [GitLab itself](../monitoring/index.md), and you should refer to your selected monitoring solution's documentation for more information.
@ -763,6 +775,7 @@ You can find a full history of changes [on the GitLab project](https://gitlab.co
**2024:**
- [2024-04](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/149878): Updated recommended sizings for Webservice nodes for Cloud Native Hybrids on GCP. Also adjusted NGINX pod recommendation to be run on Webservice node pool as a DaemonSet.
- [2024-04](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/149528): Updated 20 RPS / 1,000 User architecture specs to follow recommended memory target of 16 GB.
- [2024-04](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/148313): Updated Reference Architecture titles to include RPS for further clarity and to help right sizing.
- [2024-02](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/145436): Updated recommended sizings for Load Balancer nodes if deployed on VMs. Also added notes on network bandwidth considerations.

View File

@ -59,10 +59,19 @@ To create a system hook:
1. On the left sidebar, at the bottom, select **Admin Area**.
1. Select **System Hooks**.
1. Select **Add new webhook**.
1. Enter the **URL**.
1. Optional. Enter a [**Secret Token**](../user/project/integrations/webhooks.md#validate-requests-with-a-secret-token).
1. Select the checkbox next to each optional **Trigger** you want to enable.
1. Select **Enable SSL verification**, if desired.
1. In **URL**, enter the URL of the webhook endpoint.
The URL must be percent-encoded if it contains one or more special characters.
1. Optional. In **Name**, enter the name of the webhook.
1. Optional. In **Description**, enter the description of the webhook.
1. Optional. In **Secret token**, enter the secret token to validate requests.
The token is sent with the webhook request in the `X-Gitlab-Token` HTTP header.
Your webhook endpoint can check the token to verify the request is legitimate.
1. In the **Trigger** section, select the checkbox for each GitLab
[event](../user/project/integrations/webhook_events.md) you want to trigger the webhook.
1. Optional. Clear the **Enable SSL verification** checkbox
to disable [SSL verification](../user/project/integrations/index.md#ssl-verification).
1. Select **Add system hook**.
## Hooks request example

View File

@ -6562,25 +6562,6 @@ Input type: `NamespaceDeleteRemoteDevelopmentClusterAgentMappingInput`
| <a id="mutationnamespacedeleteremotedevelopmentclusteragentmappingclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationnamespacedeleteremotedevelopmentclusteragentmappingerrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during execution of the mutation. |
### `Mutation.namespaceIncreaseStorageTemporarily`
Input type: `NamespaceIncreaseStorageTemporarilyInput`
#### Arguments
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationnamespaceincreasestoragetemporarilyclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationnamespaceincreasestoragetemporarilyid"></a>`id` | [`NamespaceID!`](#namespaceid) | Global ID of the namespace to mutate. |
#### Fields
| Name | Type | Description |
| ---- | ---- | ----------- |
| <a id="mutationnamespaceincreasestoragetemporarilyclientmutationid"></a>`clientMutationId` | [`String`](#string) | A unique identifier for the client performing the mutation. |
| <a id="mutationnamespaceincreasestoragetemporarilyerrors"></a>`errors` | [`[String!]!`](#string) | Errors encountered during execution of the mutation. |
| <a id="mutationnamespaceincreasestoragetemporarilynamespace"></a>`namespace` | [`Namespace`](#namespace) | Namespace after mutation. |
### `Mutation.oncallRotationCreate`
Input type: `OncallRotationCreateInput`
@ -20994,7 +20975,6 @@ GPG signature for a signed commit.
| <a id="groupgroupmemberscount"></a>`groupMembersCount` | [`Int!`](#int) | Count of direct members of this group. |
| <a id="groupid"></a>`id` | [`ID!`](#id) | ID of the namespace. |
| <a id="groupisadjourneddeletionenabled"></a>`isAdjournedDeletionEnabled` **{warning-solid}** | [`Boolean!`](#boolean) | **Introduced** in GitLab 16.11. **Status**: Experiment. Indicates if delayed group deletion is enabled. |
| <a id="groupistemporarystorageincreaseenabled"></a>`isTemporaryStorageIncreaseEnabled` **{warning-solid}** | [`Boolean`](#boolean) | **Deprecated** in GitLab 16.7. Feature removal, will be completely removed in 17.0. |
| <a id="grouplfsenabled"></a>`lfsEnabled` | [`Boolean`](#boolean) | Indicates if Large File Storage (LFS) is enabled for namespace. |
| <a id="grouplockduofeaturesenabled"></a>`lockDuoFeaturesEnabled` **{warning-solid}** | [`Boolean`](#boolean) | **Introduced** in GitLab 16.10. **Status**: Experiment. Indicates if the GitLab Duo features enabled setting is enforced for all subgroups. |
| <a id="grouplockmathrenderinglimitsenabled"></a>`lockMathRenderingLimitsEnabled` | [`Boolean`](#boolean) | Indicates if math rendering limits are locked for all descendant groups. |
@ -21022,7 +21002,6 @@ GPG signature for a signed commit.
| <a id="groupstats"></a>`stats` | [`GroupStats`](#groupstats) | Group statistics. |
| <a id="groupstoragesizelimit"></a>`storageSizeLimit` | [`Float`](#float) | The storage limit (in bytes) included with the root namespace plan. This limit only applies to namespaces under namespace limit enforcement. |
| <a id="groupsubgroupcreationlevel"></a>`subgroupCreationLevel` | [`String`](#string) | Permission level required to create subgroups within the group. |
| <a id="grouptemporarystorageincreaseendson"></a>`temporaryStorageIncreaseEndsOn` **{warning-solid}** | [`Time`](#time) | **Deprecated** in GitLab 16.7. Feature removal, will be completely removed in 17.0. |
| <a id="grouptimelogcategories"></a>`timelogCategories` **{warning-solid}** | [`TimeTrackingTimelogCategoryConnection`](#timetrackingtimelogcategoryconnection) | **Introduced** in GitLab 15.3. **Status**: Experiment. Timelog categories for the namespace. |
| <a id="grouptotalrepositorysize"></a>`totalRepositorySize` | [`Float`](#float) | Total repository size of all projects in the root namespace in bytes. |
| <a id="grouptotalrepositorysizeexcess"></a>`totalRepositorySizeExcess` | [`Float`](#float) | Total excess repository size of all projects in the root namespace in bytes. This only applies to namespaces under Project limit enforcement. |
@ -25178,7 +25157,6 @@ Product analytics events for a specific month and year.
| <a id="namespacefullname"></a>`fullName` | [`String!`](#string) | Full name of the namespace. |
| <a id="namespacefullpath"></a>`fullPath` | [`ID!`](#id) | Full path of the namespace. |
| <a id="namespaceid"></a>`id` | [`ID!`](#id) | ID of the namespace. |
| <a id="namespaceistemporarystorageincreaseenabled"></a>`isTemporaryStorageIncreaseEnabled` **{warning-solid}** | [`Boolean`](#boolean) | **Deprecated** in GitLab 16.7. Feature removal, will be completely removed in 17.0. |
| <a id="namespacelfsenabled"></a>`lfsEnabled` | [`Boolean`](#boolean) | Indicates if Large File Storage (LFS) is enabled for namespace. |
| <a id="namespacename"></a>`name` | [`String!`](#string) | Name of the namespace. |
| <a id="namespacepackagesettings"></a>`packageSettings` | [`PackageSettings`](#packagesettings) | Package settings for the namespace. |
@ -25190,7 +25168,6 @@ Product analytics events for a specific month and year.
| <a id="namespacesecuritypolicyproject"></a>`securityPolicyProject` | [`Project`](#project) | Security policy project assigned to the namespace. |
| <a id="namespacesharedrunnerssetting"></a>`sharedRunnersSetting` | [`SharedRunnersSetting`](#sharedrunnerssetting) | Shared runners availability for the namespace and its descendants. |
| <a id="namespacestoragesizelimit"></a>`storageSizeLimit` | [`Float`](#float) | The storage limit (in bytes) included with the root namespace plan. This limit only applies to namespaces under namespace limit enforcement. |
| <a id="namespacetemporarystorageincreaseendson"></a>`temporaryStorageIncreaseEndsOn` **{warning-solid}** | [`Time`](#time) | **Deprecated** in GitLab 16.7. Feature removal, will be completely removed in 17.0. |
| <a id="namespacetimelogcategories"></a>`timelogCategories` **{warning-solid}** | [`TimeTrackingTimelogCategoryConnection`](#timetrackingtimelogcategoryconnection) | **Introduced** in GitLab 15.3. **Status**: Experiment. Timelog categories for the namespace. |
| <a id="namespacetotalrepositorysize"></a>`totalRepositorySize` | [`Float`](#float) | Total repository size of all projects in the root namespace in bytes. |
| <a id="namespacetotalrepositorysizeexcess"></a>`totalRepositorySizeExcess` | [`Float`](#float) | Total excess repository size of all projects in the root namespace in bytes. This only applies to namespaces under Project limit enforcement. |
@ -26605,7 +26582,7 @@ Represents generic policy violation information.
| <a id="projectforkingaccesslevel"></a>`forkingAccessLevel` | [`ProjectFeatureAccess`](#projectfeatureaccess) | Access level required for forking access. |
| <a id="projectforkscount"></a>`forksCount` | [`Int!`](#int) | Number of times the project has been forked. |
| <a id="projectfullpath"></a>`fullPath` | [`ID!`](#id) | Full path of the project. |
| <a id="projectgooglecloudartifactregistryrepository"></a>`googleCloudArtifactRegistryRepository` **{warning-solid}** | [`GoogleCloudArtifactRegistryRepository`](#googlecloudartifactregistryrepository) | **Introduced** in GitLab 16.10. **Status**: Experiment. Google Artifact Registry repository. Returns `null` if `google_cloud_support_feature_flag` feature flag is disabled. |
| <a id="projectgooglecloudartifactregistryrepository"></a>`googleCloudArtifactRegistryRepository` **{warning-solid}** | [`GoogleCloudArtifactRegistryRepository`](#googlecloudartifactregistryrepository) | **Introduced** in GitLab 16.10. **Status**: Experiment. Google Artifact Registry repository. Returns `null` if `google_cloud_support_feature_flag` feature flag is disabled, or the GitLab instance is not a SaaS instance. |
| <a id="projectgrafanaintegration"></a>`grafanaIntegration` | [`GrafanaIntegration`](#grafanaintegration) | Grafana integration details for the project. |
| <a id="projectgroup"></a>`group` | [`Group`](#group) | Group of the project. |
| <a id="projecthasjiravulnerabilityissuecreationenabled"></a>`hasJiraVulnerabilityIssueCreationEnabled` | [`Boolean!`](#boolean) | Indicates whether Jira issue creation from vulnerabilities is enabled. |
@ -33924,6 +33901,7 @@ Values for package manager.
| Value | Description |
| ----- | ----------- |
| <a id="packagemanagerapk"></a>`APK` | Package manager: apk. |
| <a id="packagemanagerbundler"></a>`BUNDLER` | Package manager: bundler. |
| <a id="packagemanagercomposer"></a>`COMPOSER` | Package manager: composer. |
| <a id="packagemanagerconan"></a>`CONAN` | Package manager: conan. |

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

View File

@ -202,8 +202,7 @@ To fix this issue:
### Merge request event does not trigger a Jenkins pipeline
This issue can occur when the request exceeds the
[webhook timeout](../user/project/integrations/webhooks.md#webhook-fails-or-multiple-webhook-requests-are-triggered),
This issue might occur when the request exceeds the [webhook timeout limit](../user/gitlab_com/index.md#webhooks),
which is set to 10 seconds by default.
For this issue, check:

View File

@ -10,7 +10,7 @@ DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
Users can sign in to GitLab by using their credentials from Twitter, GitHub, and other popular services.
Users can sign in to GitLab by using their credentials from Google, GitHub, and other popular services.
[OmniAuth](https://rubygems.org/gems/omniauth/) is the Rack framework that GitLab uses to provide this authentication.
When configured, additional sign-in options are displayed on the sign-in page.
@ -38,7 +38,6 @@ GitLab supports the following OmniAuth providers.
| [Salesforce](salesforce.md) | `salesforce` |
| [SAML](saml.md) | `saml` |
| [Shibboleth](shibboleth.md) | `shibboleth` |
| [Twitter](twitter.md) | `twitter` |
## Configure common settings
@ -64,9 +63,9 @@ To change the OmniAuth settings:
```ruby
# CAUTION!
# This allows users to sign in without having a user account first. Define the allowed providers
# using an array, for example, ["saml", "twitter"], or as true/false to allow all providers or none.
# using an array, for example, ["saml", "google_oauth2"], or as true/false to allow all providers or none.
# User accounts will be created automatically when authentication was successful.
gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'twitter']
gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'google_oauth2']
gitlab_rails['omniauth_auto_link_ldap_user'] = true
gitlab_rails['omniauth_block_auto_created_users'] = true
```
@ -92,7 +91,7 @@ To change the OmniAuth settings:
appConfig:
omniauth:
enabled: true
allowSingleSignOn: ['saml', 'twitter']
allowSingleSignOn: ['saml', 'google_oauth2']
autoLinkLdapUser: false
blockAutoCreatedUsers: true
```
@ -116,7 +115,7 @@ To change the OmniAuth settings:
gitlab:
environment:
GITLAB_OMNIBUS_CONFIG: |
gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'twitter']
gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'google_oauth2']
gitlab_rails['omniauth_auto_link_ldap_user'] = true
gitlab_rails['omniauth_block_auto_created_users'] = true
```
@ -134,15 +133,15 @@ To change the OmniAuth settings:
```yaml
## OmniAuth settings
omniauth:
# Allow sign-in by using Twitter, Google, etc. using OmniAuth providers
# Allow sign-in by using Google, GitLab, etc. using OmniAuth providers
# Versions prior to 11.4 require this to be set to true
# enabled: true
# CAUTION!
# This allows users to sign in without having a user account first. Define the allowed providers
# using an array, for example, ["saml", "twitter"], or as true/false to allow all providers or none.
# using an array, for example, ["saml", "google_oauth2"], or as true/false to allow all providers or none.
# User accounts will be created automatically when authentication was successful.
allow_single_sign_on: ["saml", "twitter"]
allow_single_sign_on: ["saml", "google_oauth2"]
auto_link_ldap_user: true
@ -238,13 +237,13 @@ users created with OmniAuth.
If you're an existing user, after your GitLab account is
created, you can activate an OmniAuth provider. For example, if you originally signed in with LDAP, you can enable an OmniAuth
provider like Twitter.
provider like Google.
1. Sign in to GitLab with your GitLab credentials, LDAP, or another OmniAuth provider.
1. On the left sidebar, select your avatar.
1. Select **Edit profile**.
1. On the left sidebar, select **Account**.
1. In the **Connected Accounts** section, select the OmniAuth provider, such as Twitter.
1. In the **Connected Accounts** section, select the OmniAuth provider, such as Google.
1. You are redirected to the provider. After you authorize GitLab,
you are redirected back to GitLab.
@ -294,21 +293,21 @@ omniauth:
You can automatically link OmniAuth users with existing GitLab users if their email addresses match.
The following example enables automatic linking
for the OpenID Connect provider and the Twitter OAuth provider.
for the OpenID Connect provider and the Google OAuth provider.
::Tabs
:::TabTitle Linux package (Omnibus)
```ruby
gitlab_rails['omniauth_auto_link_user'] = ["openid_connect", "twitter"]
gitlab_rails['omniauth_auto_link_user'] = ["openid_connect", "google_oauth2"]
```
:::TabTitle Self-compiled (source)
```yaml
omniauth:
auto_link_user: ["openid_connect", "twitter"]
auto_link_user: ["openid_connect", "google_oauth2"]
```
::EndTabs
@ -336,14 +335,14 @@ accounts are upgraded to full internal accounts.
:::TabTitle Linux package (Omnibus)
```ruby
gitlab_rails['omniauth_external_providers'] = ['twitter', 'google_oauth2']
gitlab_rails['omniauth_external_providers'] = ['saml', 'google_oauth2']
```
:::TabTitle Self-compiled (source)
```yaml
omniauth:
external_providers: ['twitter', 'google_oauth2']
external_providers: ['saml', 'google_oauth2']
```
::EndTabs
@ -410,7 +409,7 @@ When authenticating using LDAP, the user's name and email are always synced.
:::TabTitle Linux package (Omnibus)
```ruby
gitlab_rails['omniauth_sync_profile_from_provider'] = ['twitter', 'google_oauth2']
gitlab_rails['omniauth_sync_profile_from_provider'] = ['saml', 'google_oauth2']
gitlab_rails['omniauth_sync_profile_attributes'] = ['name', 'email', 'location']
```
@ -418,7 +417,7 @@ gitlab_rails['omniauth_sync_profile_attributes'] = ['name', 'email', 'location']
```yaml
omniauth:
sync_profile_from_provider: ['twitter', 'google_oauth2']
sync_profile_from_provider: ['saml', 'google_oauth2']
sync_profile_attributes: ['email', 'location']
```
@ -434,7 +433,7 @@ account to bypass 2FA. Otherwise, they are prompted to set up 2FA when they sign
To bypass 2FA, you can either:
- Define the allowed providers using an array (for example, `['twitter', 'google_oauth2']`).
- Define the allowed providers using an array (for example, `['saml', 'google_oauth2']`).
- Specify `true` to allow all providers, or `false` to allow none.
This option should be configured only for providers that already have 2FA. The default is `false`.
@ -446,14 +445,14 @@ This configuration doesn't apply to SAML.
:::TabTitle Linux package (Omnibus)
```ruby
gitlab_rails['omniauth_allow_bypass_two_factor'] = ['twitter', 'google_oauth2']
gitlab_rails['omniauth_allow_bypass_two_factor'] = ['saml', 'google_oauth2']
```
:::TabTitle Self-compiled (source)
```yaml
omniauth:
allow_bypass_two_factor: ['twitter', 'google_oauth2']
allow_bypass_two_factor: ['saml', 'google_oauth2']
```
::EndTabs

View File

@ -4,112 +4,12 @@ group: Authentication
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Twitter OAuth 1.0a OmniAuth Provider (deprecated)
# Twitter OAuth 1.0a OmniAuth Provider (removed)
DETAILS:
**Tier:** Free, Premium, Ultimate
**Offering:** Self-managed
<!--- start_remove The following content will be removed on remove_date: '2024-05-17' -->
WARNING:
This feature was [deprecated](https://gitlab.com/gitlab-com/Product/-/issues/11417) in GitLab 16.3 and is planned for removal in 17.0. Use [another supported OmniAuth provider](omniauth.md#supported-providers) instead. This change is a breaking change.
<!--- end_remove -->
NOTE:
Twitter OAuth 2.0 support is [not supported](https://gitlab.com/gitlab-org/gitlab/-/issues/366213).
To enable the Twitter OmniAuth provider you must register your application with
Twitter. Twitter generates a client ID and secret key for you to use.
## Create a new Twitter application
1. Sign in to [Twitter Application Management](https://developer.twitter.com/apps).
1. Select **Create new app**.
1. Fill in the application details.
- **Name**: This can be anything. Consider something like `<Organization>'s GitLab`, `<Your Name>'s GitLab` or
something else descriptive.
- **Description**: Create a description.
- **Website**: The URL to your GitLab installation. For example, `https://gitlab.example.com`
- **Callback URL**: `https://gitlab.example.com/users/auth/twitter/callback`
- **Developer Agreement**: Select **Yes, I agree**.
![Twitter App Details](img/twitter_app_details.png)
1. Select **Create your Twitter application**.
## Configure the application settings
1. Select the **Settings** tab.
1. Underneath the **Callback URL**, select the **Allow this application to be used to Sign in with Twitter** checkbox.
1. Select **Update settings** to save the changes.
1. Select the **Keys and Access Tokens** tab.
1. Find your **API key** and **API secret**. Keep this tab open as you continue configuration.
![Twitter app](img/twitter_app_api_keys.png)
## Configure your application on the GitLab server
1. On your GitLab server, open the configuration file.
For Linux package installations:
```shell
sudo editor /etc/gitlab/gitlab.rb
```
For self-compiled installations:
```shell
cd /home/git/gitlab
sudo -u git -H editor config/gitlab.yml
```
1. Configure the [common settings](omniauth.md#configure-common-settings)
to add `twitter` as a single sign-on provider. This enables Just-In-Time
account provisioning for users who do not have an existing GitLab account.
1. Add the provider configuration.
For Linux package installations:
```ruby
gitlab_rails['omniauth_providers'] = [
{
name: "twitter",
# label: "Provider name", # optional label for login button, defaults to "Twitter"
app_id: "<your_api_key>",
app_secret: "<your_api_secret>"
}
]
```
For self-compiled installations:
```yaml
- { name: 'twitter',
# label: 'Provider name', # optional label for login button, defaults to "Twitter"
app_id: '<your_api_key>',
app_secret: '<your_api_secret>' }
```
1. Change `<your_api_key>` to the API key from the Twitter **Keys and Access Tokens** tab.
1. Change `<your_api_secret>` to the API secret from the Twitter **Keys and Access Tokens** tab.
1. Save the configuration file.
1. For the changes to take effect:
- For Linux package installations, [reconfigure GitLab](../administration/restart_gitlab.md#reconfigure-a-linux-package-installation).
- For self-compiled installations, [restart GitLab](../administration/restart_gitlab.md#self-compiled-installations).
On the sign-in page, find the Twitter option below the regular sign-in form. Select the option to begin the authentication process. Twitter asks you to sign in and authorize the GitLab application. After authorization,
you are returned to GitLab and signed in.
This feature was [deprecated](https://gitlab.com/gitlab-com/Product/-/issues/11417) in GitLab 16.3
and [removed](https://gitlab.com/gitlab-org/gitlab/-/issues/420978) in 17.0.
Use [another supported OmniAuth provider](omniauth.md#supported-providers) instead.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

View File

@ -15,10 +15,11 @@ DETAILS:
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/392734) in GitLab 16.0. Feature flag `group_analytics_dashboards_page` removed.
To help us improve the Value Streams Dashboard, share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).
For more information, see also the [Value Stream Management category direction page](https://about.gitlab.com/direction/plan/value_stream_management/).
For more information, see the [Value Stream Management category direction page](https://about.gitlab.com/direction/plan/value_stream_management/).
The Value Streams Dashboard is a customizable dashboard you can use to identify trends, patterns, and opportunities for digital transformation improvements.
The centralized UI in Value Streams Dashboard acts as the single source of truth (SSOT), where all stakeholders can access and view the same set of metrics that are relevant to the organization. The Value Streams Dashboard includes [several panels](#value-streams-dashboard-panels) that visualize the following metrics:
The centralized UI in the Value Streams Dashboard acts as the single source of truth (SSOT), where all stakeholders can access and view the same set of metrics that are relevant to the organization.
The Value Streams Dashboard includes [panels](#value-streams-dashboard-panels) that visualize the following metrics:
- [DORA metrics](dora_metrics.md)
- [Value Stream Analytics (VSA) - flow metrics](../group/value_stream_analytics/index.md)
@ -72,19 +73,18 @@ To view metrics on the Overview panel, the [background aggregation](#enable-or-d
### DevSecOps metrics comparison panel
> - Contributor count metric [added](https://gitlab.com/gitlab-org/gitlab/-/issues/433353) in GitLab 16.9.
> - Contributor count metric at the group level [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/433353) to GitLab.com in GitLab 16.9.
The DevSecOps metrics comparison displays DORA4, vulnerability, and flow metrics for a group or project in the
month-to-date, last month, the month before, and the past 180 days.
The DevSecOps metrics comparison displays DORA4, vulnerability, and flow metrics for a group or project
in the month-to-date, last month, the month before, and the past 180 days.
This visualization helps you understand whether the key DevSecOps metrics improve month over month.
This visualization helps you get a high-level custom view over multiple DevOps metrics and
understand whether they're improving month over month. You can compare the performance between
groups, projects, and teams at a glance. This visualization helps you identify the teams and projects
that are the largest value contributors, overperforming, or underperforming.
In the comparison panel, you can:
![DevOps metrics comparison](img/devops_metrics_comparison_v15_8.png)
- Compare the performance between groups, projects, and teams at a glance.
- Identify the teams and projects that are the largest value contributors, overperforming, or underperforming.
- Drill down the metrics for further analysis.
You can also drill down the metrics for further analysis.
When you hover over a metric, a tooltip displays an explanation of the metric and a link to the related documentation page.
The monthly values also indicate a percentage increase or decrease compared to the previous month.
@ -93,9 +93,6 @@ The sparkline for the past six months represents value trends over this time per
The sparkline color ranges from blue to green, where green indicates a positive trend, and blue indicates a negative trend.
Sparklines help you identify patterns in metric trends (such as seasonal changes) over time.
NOTE:
The contributor count metric is available only on GitLab.com at the group-level. To view this metric in the comparison panel, you must [set up ClickHouse](../../integration/clickhouse.md).
### DORA Performers score panel
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/386843) in GitLab 16.3 [with a flag](../../administration/feature_flags.md) named `dora_performers_score_panel`. Disabled by default.
@ -104,11 +101,11 @@ The contributor count metric is available only on GitLab.com at the group-level.
The [DORA metrics](dora_metrics.md) Performers score panel is a bar chart that visualizes the status of the organization's DevOps performance levels across different projects.
The chart is a breakdown of your project's DORA scores, categorized as high, medium, or low.
It aggregates all the child projects in the group.
The chart is a breakdown of your project's DORA scores, [categorized](https://cloud.google.com/blog/products/devops-sre/dora-2022-accelerate-state-of-devops-report-now-out) as high, medium, or low.
The chart aggregates all the child projects in the group.
Each bar on the chart displays the sum of total projects per score category, calculated monthly.
To exclude data from the chart (for example, "Not Included"), in the legend select the series you want to exclude.
To exclude data from the chart (for example, **Not Included**), in the legend select the series you want to exclude.
Hovering over each bar reveals a dialog that explains the score's definition.
For example, if a project has a high score for deployment frequency (velocity), it means that the project has one or more deploys to production per day.
@ -120,8 +117,6 @@ For example, if a project has a high score for deployment frequency (velocity),
| Time to restore service | The number of days to restore service when a service incident or a defect that impacts users occurs | ≤1 | 2-6 | ≥7 |
| Change failure rate | The percentage of changes to production resulted in degraded service | ≤15% | 16%-44% | ≥45% |
These scoring are based on Google's classifications in the [DORA 2022 Accelerate State of DevOps Report](https://cloud.google.com/blog/products/devops-sre/dora-2022-accelerate-state-of-devops-report-now-out).
### AI Impact analytics
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/443696) in GitLab 16.11 [with a flag](../../administration/feature_flags.md) named `ai_impact_analytics_dashboard`. Disabled by default.
@ -140,7 +135,6 @@ The baseline for the AI Usage trend is the total number of code contributors, no
NOTE:
Usage rate for Code Suggestions is calculated with data starting on 2024-04-04.
For more information, see [epic 12978](https://gitlab.com/groups/gitlab-org/-/epics/12978).
#### Filter the DORA Performers score by project topics
@ -156,7 +150,7 @@ panels:
- Vue.js
```
If multiple topics are provided, all topics will need to match for the project to be included in the results.
If multiple topics are provided, all topics must match for the project to be included in the results.
## Enable or disable overview background aggregation
@ -185,6 +179,7 @@ Prerequisites:
- To view AI Impact analytics, you must:
- Enable [Code Suggestions](../../user/project/repository/code_suggestions/index.md).
- Configure [ClickHouse for contribution analytics](../../user/group/contribution_analytics/index.md#contribution-analytics-with-clickhouse).
- To view the contributor count metric in the comparison panel, you must [set up ClickHouse](../../integration/clickhouse.md).
To view the value streams dashboard:

View File

@ -19,23 +19,24 @@ see [webhook events](webhook_events.md).
You can use webhooks to:
- Trigger continuous integration (CI) jobs, update external issue trackers,
update a backup mirror, or deploy to your production server.
- [Integrate with Twilio to be notified via SMS](https://www.datadoghq.com/blog/send-alerts-sms-customizable-webhooks-twilio/)
- Trigger CI/CD jobs, update external issue trackers and
backup mirrors, or deploy to your production server.
- [Integrate with Twilio to receive SMS alerts](https://www.datadoghq.com/blog/send-alerts-sms-customizable-webhooks-twilio/)
every time an issue is created for a specific project or group in GitLab.
- [Automatically assign labels to merge requests](https://about.gitlab.com/blog/2016/08/19/applying-gitlab-labels-automatically/).
- [Assign labels automatically to merge requests](https://about.gitlab.com/blog/2016/08/19/applying-gitlab-labels-automatically/).
GitLab.com enforces [webhook limits](../../../user/gitlab_com/index.md#webhooks),
including:
GitLab.com enforces [webhook limits](../../../user/gitlab_com/index.md#webhooks), including:
- The maximum number of webhooks and their size, both per project and per group.
- The number of webhook calls per minute.
- The maximum number of webhooks per project or group
- The number of webhook calls per minute
- The number of seconds a webhook is timed out
For GitLab self-managed, an administrator can change these limits.
## Group webhooks
DETAILS:
**Tier:** Premium, Ultimate
**Offering:** GitLab.com, Self-managed, GitLab Dedicated
You can configure a group webhook, which is triggered by events
that occur across all projects in the group and its subgroups. If you configure identical webhooks
@ -58,15 +59,22 @@ To create a webhook for a project or group:
1. Select **Add new webhook**.
1. In **URL**, enter the URL of the webhook endpoint.
The URL must be percent-encoded if it contains one or more special characters.
1. Optional. In **Secret token**, enter the [secret token](#validate-requests-with-a-secret-token) to validate requests.
1. In the **Trigger** section, select the [events](webhook_events.md) to trigger the webhook.
1. Optional. Clear the **Enable SSL verification** checkbox to disable [SSL verification](index.md#ssl-verification).
1. Optional. In **Name**, enter the name of the webhook.
1. Optional. In **Description**, enter the description of the webhook.
1. Optional. In **Secret token**, enter the secret token to validate requests.
The token is sent with the webhook request in the `X-Gitlab-Token` HTTP header.
Your webhook endpoint can check the token to verify the request is legitimate.
1. In the **Trigger** section, select the checkbox for each GitLab
[event](webhook_events.md) you want to trigger the webhook.
1. Optional. Clear the **Enable SSL verification** checkbox
to disable [SSL verification](index.md#ssl-verification).
1. Select **Add webhook**.
### Mask sensitive portions of webhook URLs
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/99995) in GitLab 15.5 [with a flag](../../../administration/feature_flags.md) named `webhook_form_mask_url`. Disabled by default.
> - [Enabled on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/376106) in GitLab 15.6.
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/376106) in GitLab 15.7. Feature flag `webhook_form_mask_url` removed.
You can define and mask sensitive portions of webhook URLs and replace them
@ -144,13 +152,6 @@ You'll have this request payload that combines the template with a `push` event:
}
```
### Validate requests with a secret token
You can specify a secret token to validate webhook requests.
The token is sent with the hook request in the
`X-Gitlab-Token` HTTP header. Your webhook endpoint can check the token to verify
that the request is legitimate.
### Filter push events by branch
You can filter push events by branch. Use one of the following options to filter which push events are sent to your webhook endpoint:
@ -223,30 +224,31 @@ If the webhook URL has changed, you cannot resend the request.
## Webhook receiver requirements
Webhook receiver endpoints should be fast and stable.
Slow and unstable receivers might be [disabled automatically](#auto-disabled-webhooks) to ensure system reliability. Webhooks that fail might lead to [duplicate events](#webhook-fails-or-multiple-webhook-requests-are-triggered).
Slow and unstable receivers might be [disabled automatically](#auto-disabled-webhooks) to ensure system reliability.
Webhooks that [time out](../../../user/gitlab_com/index.md#webhooks) might lead to duplicate events.
Endpoints should follow these best practices:
- **Respond quickly with a `200` or `201` status response.** Avoid any significant processing of webhooks in the same request.
Instead, implement a queue to handle webhooks after they are received. Webhook receivers that do not respond before the [timeout limit](#webhook-timeout-limits) might be [disabled automatically](#auto-disabled-webhooks) on GitLab.com.
- **Be prepared to handle duplicate events.** In [some circumstances](#webhook-fails-or-multiple-webhook-requests-are-triggered), the same event may be sent twice. To mitigate this issue, ensure your endpoint is
reliably fast and stable.
- **Respond quickly with a `200` or `201` status response.**
Avoid any significant processing of webhooks in the same request.
Instead, implement a queue to handle webhooks after they are received.
Webhook receivers that do not respond before the timeout limit
might be disabled automatically on GitLab.com.
- **Be prepared to handle duplicate events.**
If a webhook has timed out, the same event might be sent twice.
To mitigate this issue, ensure your endpoint is reliably fast and stable.
- **Keep the response headers and body minimal.**
GitLab does not examine the response headers or body. GitLab stores them so you can examine them later in the logs to help diagnose problems. You should limit the number and size of headers returned. You can also respond to the webhook request with an empty body.
- Only return client error status responses (in the `4xx` range) to
indicate that the webhook has been misconfigured. Responses in this range might cause your webhooks to be [disabled automatically](#auto-disabled-webhooks). For example, if your receiver
only supports push events, you can return `400` if sent an issue
payload, as that is an indication that the hook has been set up
incorrectly. Alternatively, you can ignore unrecognized event
payloads.
- Never return `500` server error status responses if the event has been handled. These responses might cause the webhook to be [disabled automatically](#auto-disabled-webhooks).
GitLab stores the response headers and body so you can examine them later in the logs to help diagnose problems.
You should limit the number and size of returned headers.
You can also respond to the webhook request with an empty body.
- Only return client error status responses (in the `4xx` range) to indicate the webhook is misconfigured.
Responses in this range might cause your webhooks to be disabled automatically.
For example, if your receiver supports only push events, you can return `400` for issue payloads.
Alternatively, you can ignore unrecognized event payloads.
- Never return `500` server error status responses if the event has been handled.
These responses might cause the webhook to be disabled automatically.
- Invalid HTTP responses are treated as failed requests.
## Webhook timeout limits
For GitLab.com, the timeout limit for webhooks is [10 seconds](../../../user/gitlab_com/index.md#other-limits).
For GitLab self-managed, an administrator can [change the webhook timeout limit](../../../administration/instance_limits.md#webhook-timeout).
## Auto-disabled webhooks
> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/329849) for project webhooks in GitLab 15.7. Feature flag `web_hooks_disable_failed` removed.
@ -274,7 +276,7 @@ An auto-disabled webhook appears in the list of project or group webhooks as:
### Temporarily disabled webhooks
Project or group webhooks that return response codes in the `5xx` range
or experience a [timeout](#webhook-timeout-limits) or other HTTP errors
or experience a [timeout](../../../user/gitlab_com/index.md#webhooks) or other HTTP errors
are considered to be failing intermittently and are temporarily disabled.
These webhooks are initially disabled for one minute, which is extended
on each subsequent failure up to a maximum of 24 hours.
@ -404,9 +406,8 @@ To create a private webhook receiver:
```
1. In GitLab, [configure the webhook](#configure-webhooks-in-gitlab) and add your
receiver's URL, for example, `http://receiver.example.com:8000/`.
1. Select **Test**. You should see something like this in the console:
receiver's URL (for example, `http://receiver.example.com:8000/`).
1. Select **Test**. You should see a similar message in the console:
```plaintext
{"before":"077a85dd266e6f3573ef7e9ef8ce3343ad659c4e","after":"95cd4a99e93bc4bbabacfa2cd10e6725b1403c60",<SNIP>}
@ -414,9 +415,7 @@ To create a private webhook receiver:
- -> /
```
NOTE:
You may need to [allow requests to the local network](../../../security/webhooks.md) for this
receiver to be added.
To add this receiver, you might have to [allow requests to the local network](../../../security/webhooks.md).
## How image URLs are displayed in the webhook body
@ -462,7 +461,7 @@ certificate in PEM format. This certificate is set globally and
presented to the server during a TLS handshake. The certificate can also
be protected with a PEM passphrase.
To configure the certificate, follow the instructions below.
To configure the certificate:
::Tabs
@ -536,20 +535,15 @@ To configure the certificate, follow the instructions below.
## Troubleshooting
### Unable to get local issuer certificate
### `unable to get local issuer certificate`
When SSL verification is enabled, you might get an error that GitLab cannot
verify the SSL certificate of the webhook endpoint.
Typically, this error occurs because the root certificate isn't
issued by a trusted certification authority as
determined by [CAcert.org](http://www.cacert.org/).
When SSL verification is enabled, you might get an error that GitLab
cannot verify the SSL certificate of the webhook endpoint.
Typically, this error occurs because the root certificate is not issued
by a trusted certificate authority as determined by [CAcert.org](http://www.cacert.org/).
If that is not the case, consider using [SSL Checker](https://www.sslshopper.com/ssl-checker.html) to identify faults.
Missing intermediate certificates are common causes of verification failure.
### Webhook fails or multiple webhook requests are triggered
If you're receiving multiple webhook requests, the webhook might have [timed out](#webhook-timeout-limits).
To resolve this issue, consider using [SSL Checker](https://www.sslshopper.com/ssl-checker.html) to identify errors.
Missing intermediate certificates are a common cause of verification failure.
### Webhook is not triggered

View File

@ -4,7 +4,7 @@ module Gitlab::UsageDataCounters
class NoteCounter < BaseCounter
KNOWN_EVENTS = %w[create].freeze
PREFIX = 'note'
COUNTABLE_TYPES = %w[Snippet Commit MergeRequest].freeze
COUNTABLE_TYPES = %w[MergeRequest].freeze
class << self
def redis_key(event, noteable_type)

View File

@ -8,7 +8,9 @@
# ...
#
'{event_counters}_analytics_dashboard_viewed': USAGE_PRODUCT_ANALYTICS_VIEW_DASHBOARD
'{event_counters}_create_commit_note': USAGE_NOTE_CREATE_COMMIT
'{event_counters}_create_snippet': USAGE_SNIPPET_CREATE
'{event_counters}_create_snippet_note': USAGE_NOTE_CREATE_SNIPPET
'{event_counters}_licenses_list_viewed': USAGE_LICENSES_LIST_VIEWS
'{event_counters}_update_snippet': USAGE_SNIPPET_UPDATE
'{event_counters}_usage_data_download_payload_clicked': USAGE_SERVICE_USAGE_DATA_DOWNLOAD_PAYLOAD_CLICK

View File

@ -24122,7 +24122,13 @@ msgstr[1] ""
msgid "GoogleArtifactRegistry|%{linkStart}Install the Google Cloud CLI%{linkEnd}."
msgstr ""
msgid "GoogleArtifactRegistry|After the policies have been created, select %{strongStart}Save changes%{strongEnd} to continue."
msgid "GoogleArtifactRegistry|1. Repository"
msgstr ""
msgid "GoogleArtifactRegistry|2. Set up permissions"
msgstr ""
msgid "GoogleArtifactRegistry|After the roles have been granted, select %{strongStart}Save changes%{strongEnd} to continue."
msgstr ""
msgid "GoogleArtifactRegistry|An error occurred while fetching the artifact details."
@ -24131,18 +24137,9 @@ msgstr ""
msgid "GoogleArtifactRegistry|An error occurred while fetching the artifacts."
msgstr ""
msgid "GoogleArtifactRegistry|Before you begin:"
msgstr ""
msgid "GoogleArtifactRegistry|Built"
msgstr ""
msgid "GoogleArtifactRegistry|Configuration instructions"
msgstr ""
msgid "GoogleArtifactRegistry|Configure Google Cloud IAM policies"
msgstr ""
msgid "GoogleArtifactRegistry|Configure in settings"
msgstr ""
@ -24161,7 +24158,7 @@ msgstr ""
msgid "GoogleArtifactRegistry|Digest"
msgstr ""
msgid "GoogleArtifactRegistry|Ensure you have the %{linkStart}permissions needed%{linkEnd} in your Google Cloud project."
msgid "GoogleArtifactRegistry|Ensure you have the %{linkStart}permissions%{linkEnd} to manage access to your Google Cloud project."
msgstr ""
msgid "GoogleArtifactRegistry|First, secure your usage with the %{strongStart}Google Cloud IAM%{strongEnd} integration. Simplify access without the need to manage accounts or keys."
@ -24209,7 +24206,7 @@ msgstr ""
msgid "GoogleArtifactRegistry|Repository: %{repository}"
msgstr ""
msgid "GoogleArtifactRegistry|Run the following command to set up IAM read and write policies in your Google Cloud project."
msgid "GoogleArtifactRegistry|Run the following command to grant roles in your Google Cloud project. You might be prompted to sign into Google."
msgstr ""
msgid "GoogleArtifactRegistry|Set up Google Cloud IAM"
@ -24218,6 +24215,12 @@ msgstr ""
msgid "GoogleArtifactRegistry|Tags"
msgstr ""
msgid "GoogleArtifactRegistry|To improve security, use a Google Cloud project for resources only, separate from CI/CD and identity management projects."
msgstr ""
msgid "GoogleArtifactRegistry|To use the integration, allow this GitLab project to read and write to Google Artifact Registry. You can use the following recommended setup or customize it with other %{claimsStart}OIDC custom claims%{claimsEnd} and %{rolesStart}Artifact Registry roles%{rolesEnd}."
msgstr ""
msgid "GoogleArtifactRegistry|Updated"
msgstr ""
@ -24233,12 +24236,6 @@ msgstr ""
msgid "GoogleArtifactRegistry|Virtual size"
msgstr ""
msgid "GoogleArtifactRegistry|You might be prompted to sign into Google."
msgstr ""
msgid "GoogleArtifactRegistry|Your Google Cloud project must have specific Identity and Access Management (IAM) policies to use the Artifact Registry repository in this GitLab project."
msgstr ""
msgid "GoogleCloud|%{linkStart}Switch to the guided setup%{linkEnd} if you can manage workload identity federation in Google Cloud. %{link2Start}What are the required permissions?%{link2End}"
msgstr ""
@ -24248,6 +24245,9 @@ msgstr ""
msgid "GoogleCloud|%{link_start}Explore Google Cloud integration with GitLab%{link_end}, for CI/CD and more."
msgstr ""
msgid "GoogleCloud|%{link_start}Wheres my project ID? %{icon}%{link_end} Can be 6 to 30 lowercase letters, numbers, or hyphens. Must start with a letter and end with a letter or number. Example: %{code_start}my-sample-project-191923%{code_end}."
msgstr ""
msgid "GoogleCloud|After Google Cloud workload identity federation has been set up, select %{strongStart}Continue%{strongEnd}."
msgstr ""
@ -24257,6 +24257,9 @@ msgstr ""
msgid "GoogleCloud|Before you begin, %{linkStart}install the Google Cloud CLI%{linkEnd}."
msgstr ""
msgid "GoogleCloud|Can be up to 63 lowercase letters, numbers, or hyphens. Must start with a letter and end with a letter or number. Repository must be Docker format and Standard mode."
msgstr ""
msgid "GoogleCloud|Cancel"
msgstr ""
@ -24377,9 +24380,6 @@ msgstr ""
msgid "GoogleCloud|Repository location"
msgstr ""
msgid "GoogleCloud|Repository must be Docker format and Standard mode."
msgstr ""
msgid "GoogleCloud|Repository name"
msgstr ""
@ -24407,9 +24407,6 @@ msgstr ""
msgid "GoogleCloud|The google_cloud_support feature is not available"
msgstr ""
msgid "GoogleCloud|To improve security, use a dedicated project for resources, separate from CI/CD and identity management projects. %{link_start}Wheres my project ID? %{icon}%{link_end}"
msgstr ""
msgid "GoogleCloud|Workload identity federation"
msgstr ""
@ -51316,12 +51313,6 @@ msgstr ""
msgid "Templates"
msgstr ""
msgid "TemporaryStorageIncrease|can only be set once"
msgstr ""
msgid "TemporaryStorageIncrease|can only be set with more than %{percentage}%% usage"
msgstr ""
msgid "Terminal"
msgstr ""

View File

@ -4,6 +4,6 @@ FactoryBot.define do
factory :terraform_module_metadatum, class: 'Packages::TerraformModule::Metadatum' do
package { association(:terraform_module_package) }
project { package.project }
fields { { root: { description: 'README' } } }
fields { { root: { readme: 'README' } } }
end
end

View File

@ -15,7 +15,7 @@ RSpec.describe 'OAuth Login', :allow_forgery_protection, feature_category: :syst
stub_omniauth_provider(provider)
end
providers = [:github, :twitter, :bitbucket, :gitlab, :google_oauth2,
providers = [:github, :bitbucket, :gitlab, :google_oauth2,
:facebook, :auth0, :salesforce, :alicloud]
around do |example|

View File

@ -16,7 +16,6 @@ RSpec.describe 'OAuth Registration', :js, :allow_forgery_protection, feature_cat
where(:provider, :additional_info) do
:github | {}
:twitter | {}
:bitbucket | {}
:gitlab | {}
:google_oauth2 | {}

View File

@ -0,0 +1,152 @@
{
"root": {
"inputs": [
{
"name": "name",
"type": "string",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "task_definition",
"type": "string",
"description": "Repellendus sequi sed quo quasi."
},
{
"name": "target_definition",
"type": "bool",
"default": "false",
"description": "In voluptatibus libero accusamus aut."
},
{
"name": "multiple_definitions",
"type": "list",
"default": "[]",
"description": "Necessitatibus tempore id ipsam dolor."
}
],
"readme": "# Gitlab Local File\n\nThis repository contains a [Terraform](https://www.terraform.io/) module to create a local file.\n\n## How do you use this module?\n\nThis folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your\ncode by adding a `module` configuration and setting its `source` parameter to the URL of this folder:\n\n```hcl\nmodule \"gitlab_local_file\" {\n source = \"gitlab.com/mattkasa/terraform-modules/gitlab-local-file?ref=master\"\n\n text = \"Hello World\"\n filename = \"hello\"\n}\n```\n",
"outputs": [
{
"name": "name",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "output",
"description": "Repellendus sequi sed quo quasi."
}
],
"resources": [
"local_file.file",
"null_resource.null",
"null_resource.null2"
],
"dependencies": {
"modules": [
{
"name": "gitlab_local_file",
"source": "gitlab/example/local"
},
{
"name": "gitlab_remote_file",
"source": "gitlab/example/remote"
}
],
"providers": [
{
"name": "aws",
"version": ">= 2.7.0"
},
{
"name": "google",
"version": ">= 2.7.0"
}
]
}
},
"submodules": {
"gitlab_local_file": {
"inputs": [
{
"name": "text",
"type": "string",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "filename",
"type": "string",
"description": "Repellendus sequi sed quo quasi."
}
],
"readme": "# Gitlab Local File\n\nThis repository contains a [Terraform](https://www.terraform.io/) module to create a local file.\n\n## How do you use this module?\n\nThis folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your\ncode by adding a `module` configuration and setting its `source` parameter to the URL of this folder:\n\n```hcl\nmodule \"gitlab_local_file\" {\n source = \"gitlab.com/mattkasa/terraform-modules/gitlab-local-file?ref=master\"\n\n text = \"Hello World\"\n filename = \"hello\"\n}\n```\n",
"outputs": [
{
"name": "name",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "output",
"description": "Repellendus sequi sed quo quasi."
}
],
"resources": [
"local_file.file",
"null_resource.null",
"null_resource.null2"
],
"dependencies": {
"modules": [
{
"name": "gitlab_local_file",
"source": "gitlab/example/local"
},
{
"name": "gitlab_remote_file",
"source": "gitlab/example/remote",
"version": ">= 2.7.0"
}
],
"providers": [
{
"name": "aws",
"version": ">= 2.7.0"
},
{
"name": "null",
"source": "hashicorp/null",
"version": ">= 3.0"
},
{
"name": "template"
}
]
}
}
},
"examples": {
"basic": {
"inputs": [
{
"name": "Hello World",
"type": "string",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "hello",
"type": "string",
"description": "Repellendus sequi sed quo quasi."
}
],
"outputs": [
{
"name": "name",
"description": "Tenetur laudantium voluptatibus incidunt ipsum iste et."
},
{
"name": "output",
"description": "Repellendus sequi sed quo quasi."
}
],
"readme": "# Gitlab Local File\n\nThis repository contains a [Terraform](https://www.terraform.io/) module to create a local file.\n\n## How do you use this module?\n\nThis folder defines a [Terraform module](https://www.terraform.io/docs/modules/usage.html), which you can use in your\ncode by adding a `module` configuration and setting its `source` parameter to the URL of this folder:\n\n```hcl\nmodule \"gitlab_local_file\" {\n source = \"gitlab.com/mattkasa/terraform-modules/gitlab-local-file?ref=master\"\n\n text = \"Hello World\"\n filename = \"hello\"\n}\n```\n"
}
}
}

Some files were not shown because too many files have changed in this diff Show More