Add latest changes from gitlab-org/gitlab@master

This commit is contained in:
GitLab Bot 2024-10-15 18:18:45 +00:00
parent a24e628777
commit 75a95cff04
116 changed files with 894 additions and 548 deletions

View File

@ -1192,11 +1192,16 @@ lib/gitlab/checks/**
/ee/spec/requests/custom_roles/
/ee/lib/api/member_roles.rb
[Container registry] @gitlab-org/ci-cd/package-stage/container-registry-group
/app/assets/javascripts/packages_and_registries/container_registry/
/app/assets/javascripts/packages_and_registries/dependency_proxy/
/app/assets/javascripts/packages_and_registries/harbor_registry/
/ee/app/services/ee/auth/container_registry_authentication_service.rb
[Authentication] @gitlab-org/govern/authentication/approvers
/app/assets/javascripts/access_tokens/
/app/assets/javascripts/alerts_settings/graphql/mutations/reset_http_token.mutation.graphql
/app/assets/javascripts/authentication/
/app/assets/javascripts/packages_and_registries/package_registry/components/list/tokens/
/app/assets/javascripts/pages/admin/impersonation_tokens/
/app/assets/javascripts/pages/groups/settings/access_tokens/
/app/assets/javascripts/pages/ldap/
@ -1365,7 +1370,6 @@ lib/gitlab/checks/**
/ee/app/models/ee/project_authorization.rb
/ee/app/models/scim_oauth_access_token.rb
/ee/app/serializers/scim_oauth_access_token_entity.rb
/ee/app/services/ee/auth/
/ee/app/services/ee/personal_access_tokens/
/ee/app/services/ee/resource_access_tokens/
/ee/app/services/ee/users/authorized_build_service.rb

View File

@ -114,7 +114,6 @@ Rails/StrongParams:
- 'app/controllers/profiles/groups_controller.rb'
- 'app/controllers/profiles/slacks_controller.rb'
- 'app/controllers/profiles/two_factor_auths_controller.rb'
- 'app/controllers/profiles/webauthn_registrations_controller.rb'
- 'app/controllers/projects/alert_management_controller.rb'
- 'app/controllers/projects/alerting/notifications_controller.rb'
- 'app/controllers/projects/analytics/cycle_analytics/stages_controller.rb'

View File

@ -1240,7 +1240,6 @@ RSpec/FeatureCategory:
- 'spec/controllers/profiles/avatars_controller_spec.rb'
- 'spec/controllers/profiles/emails_controller_spec.rb'
- 'spec/controllers/profiles/preferences_controller_spec.rb'
- 'spec/controllers/profiles/webauthn_registrations_controller_spec.rb'
- 'spec/controllers/profiles_controller_spec.rb'
- 'spec/controllers/projects/analytics/cycle_analytics/stages_controller_spec.rb'
- 'spec/controllers/projects/analytics/cycle_analytics/summary_controller_spec.rb'

View File

@ -1239,7 +1239,6 @@ RSpec/NamedSubject:
- 'spec/controllers/profiles/preferences_controller_spec.rb'
- 'spec/controllers/profiles/slacks_controller_spec.rb'
- 'spec/controllers/profiles/two_factor_auths_controller_spec.rb'
- 'spec/controllers/profiles/webauthn_registrations_controller_spec.rb'
- 'spec/controllers/projects/analytics/cycle_analytics/summary_controller_spec.rb'
- 'spec/controllers/projects/artifacts_controller_spec.rb'
- 'spec/controllers/projects/avatars_controller_spec.rb'

View File

@ -108,7 +108,6 @@ Style/ClassAndModuleChildren:
- 'app/controllers/profiles/notifications_controller.rb'
- 'app/controllers/profiles/preferences_controller.rb'
- 'app/controllers/profiles/two_factor_auths_controller.rb'
- 'app/controllers/profiles/webauthn_registrations_controller.rb'
- 'app/controllers/projects/alert_management_controller.rb'
- 'app/controllers/projects/analytics/cycle_analytics/stages_controller.rb'
- 'app/controllers/projects/analytics/cycle_analytics/summary_controller.rb'

View File

@ -1,3 +1,5 @@
import initSearchSettings from '~/search_settings';
import initAccordion from '~/accordion';
initSearchSettings();
document.querySelectorAll('.js-experimental-setting-accordion').forEach(initAccordion);

View File

@ -34,7 +34,6 @@ export default {
TIMESTAMP_TYPE_UPDATED_AT,
PROJECT_DASHBOARD_TABS,
i18n: {
heading: __('Projects'),
projectCountError: __('An error occurred loading the project counts.'),
},
filteredSearchAndSort: {
@ -218,52 +217,48 @@ export default {
</script>
<template>
<div>
<h1 class="page-title gl-mt-5 gl-text-size-h-display">{{ $options.i18n.heading }}</h1>
<gl-tabs :value="activeTabIndex" @input="onTabUpdate">
<gl-tab v-for="tab in $options.PROJECT_DASHBOARD_TABS" :key="tab.text" lazy>
<template #title>
<div class="gl-flex gl-items-center gl-gap-2" data-testid="projects-dashboard-tab-title">
<span>{{ tab.text }}</span>
<gl-badge v-if="shouldShowCountBadge(tab)" size="sm" class="gl-tab-counter-badge">{{
numberToMetricPrefix(tabCount(tab))
}}</gl-badge>
</div>
</template>
<tab-view
v-if="tab.query"
:tab="tab"
:start-cursor="startCursor"
:end-cursor="endCursor"
:sort="sort"
:filters="filters"
@page-change="onPageChange"
/>
<template v-else>{{ tab.text }}</template>
</gl-tab>
<template #tabs-end>
<li class="gl-w-full">
<filtered-search-and-sort
class="gl-border-b-0"
:filtered-search-namespace="$options.filteredSearchAndSort.namespace"
:filtered-search-tokens="filteredSearchTokens"
:filtered-search-term-key="$options.filteredSearchAndSort.searchTermKey"
:filtered-search-recent-searches-storage-key="
$options.filteredSearchAndSort.recentSearchesStorageKey
"
:filtered-search-query="$route.query"
:is-ascending="isAscending"
:sort-options="$options.filteredSearchAndSort.sortOptions"
:active-sort-option="activeSortOption"
@filter="onFilter"
@sort-direction-change="onSortDirectionChange"
@sort-by-change="onSortByChange"
/>
</li>
<gl-tabs :value="activeTabIndex" @input="onTabUpdate">
<gl-tab v-for="tab in $options.PROJECT_DASHBOARD_TABS" :key="tab.text" lazy>
<template #title>
<div class="gl-flex gl-items-center gl-gap-2" data-testid="projects-dashboard-tab-title">
<span>{{ tab.text }}</span>
<gl-badge v-if="shouldShowCountBadge(tab)" size="sm" class="gl-tab-counter-badge">{{
numberToMetricPrefix(tabCount(tab))
}}</gl-badge>
</div>
</template>
</gl-tabs>
</div>
<tab-view
v-if="tab.query"
:tab="tab"
:start-cursor="startCursor"
:end-cursor="endCursor"
:sort="sort"
:filters="filters"
@page-change="onPageChange"
/>
<template v-else>{{ tab.text }}</template>
</gl-tab>
<template #tabs-end>
<li class="gl-w-full">
<filtered-search-and-sort
class="gl-border-b-0"
:filtered-search-namespace="$options.filteredSearchAndSort.namespace"
:filtered-search-tokens="filteredSearchTokens"
:filtered-search-term-key="$options.filteredSearchAndSort.searchTermKey"
:filtered-search-recent-searches-storage-key="
$options.filteredSearchAndSort.recentSearchesStorageKey
"
:filtered-search-query="$route.query"
:is-ascending="isAscending"
:sort-options="$options.filteredSearchAndSort.sortOptions"
:active-sort-option="activeSortOption"
@filter="onFilter"
@sort-direction-change="onSortDirectionChange"
@sort-by-change="onSortByChange"
/>
</li>
</template>
</gl-tabs>
</template>

View File

@ -59,6 +59,7 @@ export default {
...mapState(['searchLabelString', 'query', 'urlQuery', 'aggregations']),
...mapGetters([
'filteredLabels',
'labelAggregationBuckets',
'filteredUnselectedLabels',
'filteredAppliedSelectedLabels',
'appliedSelectedLabels',
@ -83,9 +84,12 @@ export default {
return this.$refs.searchLabelInputBox?.$el.querySelector('[role=searchbox]');
},
combinedSelectedFilters() {
const appliedSelectedLabelKeys = this.appliedSelectedLabels.map((label) => label.key);
const appliedSelectedLabelKeys = this.appliedSelectedLabels.map((label) => label.title);
const { labels = [] } = this.query;
return uniq([...appliedSelectedLabelKeys, ...labels]);
const uniqueResults = uniq([...appliedSelectedLabelKeys, ...labels]);
return uniqueResults;
},
searchLabels: {
get() {
@ -97,7 +101,7 @@ export default {
},
selectedLabels: {
get() {
return this.combinedSelectedLabels;
return this.convertLabelNamesToIds(this.combinedSelectedLabels);
},
set(value) {
const labelName = this.getLabelNameById(value);
@ -159,13 +163,21 @@ export default {
},
getLabelNameById(labelIds) {
const labelNames = labelIds.map((id) => {
const label = this.filteredLabels.find((filteredLabel) => {
const label = this.labelAggregationBuckets.find((filteredLabel) => {
return filteredLabel.key === String(id);
});
return label?.title;
});
return labelNames;
},
convertLabelNamesToIds(labelNames) {
const labels = labelNames.map((labelName) =>
this.labelAggregationBuckets.find((label) => {
return label.title === labelName;
}),
);
return labels.map((label) => label.key);
},
},
FIRST_DROPDOWN_INDEX,
SEARCH_RESULTS_DESCRIPTION,
@ -259,6 +271,7 @@ export default {
<label-dropdown-items
v-if="hasSelectedLabels"
:labels="filteredAppliedSelectedLabels"
data-testid="selected-labels-checkboxes"
/>
<gl-dropdown-divider v-if="hasSelectedLabels && hasUnselectedLabels" />
<label-dropdown-items v-if="hasUnselectedLabels" :labels="filteredUnselectedLabels" />

View File

@ -26,6 +26,7 @@ export default {
class="label-filter-menu-item gl-px-5 gl-py-3"
>
<gl-form-checkbox
v-model="label.checked"
class="label-with-color-checkbox gl-inline-flex gl-min-h-5"
:value="label.key"
>

View File

@ -1,4 +1,4 @@
import { findKey, intersection } from 'lodash';
import { findKey, intersection, difference } from 'lodash';
import { languageFilterData } from '~/search/sidebar/components/language_filter/data';
import {
LABEL_FILTER_PARAM,
@ -20,7 +20,7 @@ const appliedSelectedLabelsKeys = (state) =>
intersection(urlQueryLabelFilters(state), queryLabelFilters(state));
const unselectedLabelsKeys = (state) =>
urlQueryLabelFilters(state)?.filter((label) => !queryLabelFilters(state)?.includes(label));
difference(urlQueryLabelFilters(state), queryLabelFilters(state));
const unappliedNewLabelKeys = (state) => {
return state?.query?.[LABEL_FILTER_PARAM]?.filter(
@ -65,11 +65,10 @@ export const filteredLabels = (state) => {
export const filteredAppliedSelectedLabels = (state) =>
filteredLabels(state)?.filter((label) => urlQueryLabelFilters(state)?.includes(label.title));
export const appliedSelectedLabels = (state) => {
return labelAggregationBuckets(state)?.filter((label) =>
export const appliedSelectedLabels = (state) =>
labelAggregationBuckets(state)?.filter((label) =>
appliedSelectedLabelsKeys(state)?.includes(label.title),
);
};
export const filteredUnselectedLabels = (state) =>
filteredLabels(state)?.filter((label) => !urlQueryLabelFilters(state)?.includes(label.title));

View File

@ -118,6 +118,7 @@ export default {
category="tertiary"
icon="plus"
size="small"
data-testid="add-time-entry-button"
:title="__('Add time entry')"
:aria-label="__('Add time entry')"
/>
@ -131,6 +132,7 @@ export default {
v-gl-modal="'time-tracking-report'"
v-gl-tooltip="s__('TimeTracking|View time tracking report')"
variant="link"
data-testid="view-time-spent-button"
>
{{ humanTotalTimeSpent }}
</gl-button>
@ -150,6 +152,7 @@ export default {
v-gl-modal="$options.setTimeEstimateModalId"
v-gl-tooltip="s__('TimeTracking|Set estimate')"
variant="link"
data-testid="set-estimate-button"
>
{{ humanTimeEstimate }}
</gl-button>
@ -162,6 +165,7 @@ export default {
v-gl-modal="$options.setTimeEstimateModalId"
class="gl-ml-auto"
variant="link"
data-testid="add-estimate-button"
>
{{ s__('TimeTracking|Add estimate') }}
</gl-button>
@ -173,6 +177,7 @@ export default {
v-gl-modal="$options.setTimeEstimateModalId"
class="gl-align-baseline !gl-text-sm"
variant="link"
data-testid="add-estimate-button"
>
{{ content }}
</gl-button>
@ -182,6 +187,7 @@ export default {
v-gl-modal="$options.createTimelogModalId"
class="gl-align-baseline !gl-text-sm"
variant="link"
data-testid="add-time-spent-button"
>
{{ content }}
</gl-button>

View File

@ -1,6 +1,6 @@
.gl-accordion-item
%h3.gl-accordion-item-header
= render Pajamas::ButtonComponent.new(variant: :link, icon: icon, icon_classes: "js-chevron-icon", button_options: { "aria-controls": "accordion-item", "aria-expanded": expanded }) do
= render Pajamas::ButtonComponent.new(variant: :link, icon: icon, icon_classes: "js-chevron-icon", button_options: { "aria-controls": "accordion-item", "aria-expanded": expanded?, **@button_options}) do
= @title
.accordion-item.gl-mt-3.gl-text-base.collapse{ **body_class }

View File

@ -9,9 +9,11 @@ module Pajamas
# @param [String] title
# @param [Symbol] state
def initialize(title: nil, state: :closed)
# @param [Hash] button_options
def initialize(title: nil, state: :closed, button_options: {})
@title = title
@state = filter_attribute(state.to_sym, STATE_OPTIONS)
@button_options = button_options
end
def icon
@ -22,7 +24,7 @@ module Pajamas
@state == :opened ? { class: 'show' } : {}
end
def expanded
def expanded?
@state == :opened
end
end

View File

@ -3,7 +3,7 @@
class Profiles::TwoFactorAuthsController < Profiles::ApplicationController
skip_before_action :check_two_factor_requirement
before_action :ensure_verified_primary_email, only: [:show, :create]
before_action :validate_current_password, only: [:create, :codes, :destroy, :destroy_otp, :create_webauthn], if: :current_password_required?
before_action :validate_current_password, only: [:create, :codes, :destroy, :destroy_otp, :destroy_webauthn, :create_webauthn], if: :current_password_required?
before_action :update_current_user_otp!, only: [:show]
helper_method :current_password_required?
@ -103,6 +103,12 @@ class Profiles::TwoFactorAuthsController < Profiles::ApplicationController
end
end
def destroy_webauthn
Webauthn::DestroyService.new(current_user, current_user, params[:id]).execute
redirect_to profile_two_factor_auth_path, status: :found, notice: _("Successfully deleted WebAuthn device.")
end
def skip
if two_factor_grace_period_expired?
redirect_to new_profile_two_factor_auth_path, alert: _('Cannot skip two factor authentication setup')
@ -185,7 +191,7 @@ class Profiles::TwoFactorAuthsController < Profiles::ApplicationController
{
name: webauthn_registration.name,
created_at: webauthn_registration.created_at,
delete_path: profile_webauthn_registration_path(webauthn_registration)
delete_path: destroy_webauthn_profile_two_factor_auth_path(webauthn_registration)
}
end
end

View File

@ -1,11 +0,0 @@
# frozen_string_literal: true
class Profiles::WebauthnRegistrationsController < Profiles::ApplicationController
feature_category :system_access
def destroy
Webauthn::DestroyService.new(current_user, current_user, params[:id]).execute
redirect_to profile_two_factor_auth_path, status: :found, notice: _("Successfully deleted WebAuthn device.")
end
end

View File

@ -214,12 +214,19 @@ module AuthHelper
password_required: password_required.to_s }
end
def delete_webauthn_device_data(path)
def delete_webauthn_device_data(password_required, path)
message = if password_required
_('Are you sure you want to delete this WebAuthn device? ' \
'Enter your password to continue.')
else
_('Are you sure you want to delete this WebAuthn device?')
end
{ button_text: _('Delete WebAuthn device'),
icon: 'remove',
message: _('Are you sure you want to delete this WebAuthn device?'),
message: message,
path: path,
password_required: 'false' }
password_required: password_required.to_s }
end
def disable_two_factor_authentication_data(password_required)

View File

@ -11,6 +11,7 @@ module Packages
enum :status, default: 0, processing: 1, error: 3
belongs_to :package, -> { where(package_type: :nuget) }, inverse_of: :nuget_symbols
belongs_to :project
delegate :project_id, :project, to: :package

View File

@ -52,7 +52,8 @@ module Packages
file_path: path,
signature: signature,
size: file.size,
file_sha256: checksum
file_sha256: checksum,
project_id: package.project_id
)
rescue StandardError => e
Gitlab::ErrorTracking.track_exception(e, class: self.class.name, package_id: package.id)

View File

@ -8,5 +8,3 @@
- if current_user.can_create_project?
= render Pajamas::ButtonComponent.new(href: new_project_path, variant: :confirm, button_options: { data: { testid: 'new-project-button' } }) do
= _("New project")
= render 'dashboard/projects_nav'

View File

@ -10,10 +10,12 @@
= render "projects/last_push"
- if Feature.enabled?(:your_work_projects_vue, current_user)
= render 'dashboard/projects_head'
#js-your-work-projects-app{ data: { app_data: dashboard_projects_app_data } }
- else
- if show_projects?(@projects, params)
= render 'dashboard/projects_head'
= render 'dashboard/projects_nav'
= render 'projects'
- else
= render "zero_authorized_projects"

View File

@ -5,9 +5,11 @@
= render "projects/last_push"
- if Feature.enabled?(:your_work_projects_vue, current_user)
#js-your-work-projects-app
= render 'dashboard/projects_head'
#js-your-work-projects-app{ data: { app_data: dashboard_projects_app_data } }
- else
= render 'dashboard/projects_head', project_tab_filter: :starred
= render 'dashboard/projects_nav'
- if params[:filter_projects] || any_projects?(@projects)
= render 'projects'

View File

@ -1,4 +1 @@
- if Feature.enabled?(:your_work_projects_vue, current_user)
#js-your-work-projects-app{ data: { app_data: dashboard_projects_app_data } }
- else
= render partial: 'dashboard/projects/shared/common', locals: {page_title: _('Starred Projects'), empty_page: 'starred_empty_state'}
= render partial: 'dashboard/projects/shared/common', locals: {page_title: _('Starred Projects'), empty_page: 'starred_empty_state'}

View File

@ -19,8 +19,12 @@
tag_pair(tag.strong, :strong_start, :strong_end))
- if @pre_auth.scopes
- @pre_auth.scopes.each do |scope|
%strong= t scope, scope: [:doorkeeper, :scopes]
.gl-text-gray-500.gl-pb-5.gl-text-sm= t scope, scope: [:doorkeeper, :scope_desc]
.js-experimental-setting-accordion.gl-mb-5
- title = t(scope, scope: [:doorkeeper, :scopes])
- description = t(scope, scope: [:doorkeeper, :scope_desc])
= render Pajamas::AccordionItemComponent.new(title: title, state: :closed, button_options: { class: '!gl-text-default gl-font-bold' }) do
.gl-text-gray-500.gl-text-sm
= description
.info-well
.well-segment
- if Gitlab.com? && !@pre_auth.client.application.owner

View File

@ -42,7 +42,7 @@
%p
- register_2fa_token = _('We recommend using cloud-based authenticator applications that can restore access if you lose your hardware device.')
= register_2fa_token.html_safe
= link_to _('What are some examples?'), help_page_path('user/profile/account/two_factor_authentication.md', anchor: 'enable-one-time-password'), target: '_blank', rel: 'noopener noreferrer'
= link_to _('What are some examples?'), help_page_path('user/profile/account/two_factor_authentication.md', anchor: 'enable-a-one-time-password-authenticator'), target: '_blank', rel: 'noopener noreferrer'
.gl-p-2.gl-mb-3{ style: 'background: #fff' }
= raw @qr_code
.gl-mb-5
@ -93,7 +93,7 @@
= _("Not all browsers support WebAuthn. You must save your recovery codes after you first register a two-factor authenticator to be able to sign in, even from an unsupported browser.")
- else
= _("Not all browsers support WebAuthn. Therefore, we require that you set up a two-factor authentication app first. That way you'll always be able to sign in, even from an unsupported browser.")
.col-lg-8{ data: { testid: 'webauthn' } }
.col-lg-8
- if @webauthn_registration.errors.present?
= form_errors(@webauthn_registration)
= render "authentication/register", target_path: create_webauthn_profile_two_factor_auth_path
@ -127,7 +127,7 @@
%td= registration[:created_at].to_date.to_fs(:medium)
%td
.gl-float-right
.js-two-factor-action-confirm{ data: delete_webauthn_device_data(registration[:delete_path]) }
.js-two-factor-action-confirm{ data: delete_webauthn_device_data(current_password_required?, registration[:delete_path]) }
- else
.settings-message.text-center

View File

@ -37,9 +37,10 @@
.gl-mr-3
- if mr_status.present? && can?(current_user, :read_merge_request, related_merge_request)
.issuable-reference.gl-flex.gl-justify-end.gl-overflow-hidden
= gl_badge_tag issuable_reference(related_merge_request),
{ icon: mr_status[:icon], variant: mr_status[:variant], href: merge_request_path(related_merge_request) },
{ class: 'gl-truncate', title: mr_status[:title], data: { toggle: 'tooltip', container: 'body' } }
= gl_badge_tag({ icon: mr_status[:icon], variant: mr_status[:variant], href: merge_request_path(related_merge_request) },
{ class: 'gl-truncate', title: mr_status[:title], data: { toggle: 'tooltip', container: 'body' } }) do
.gl-truncate
= issuable_reference(related_merge_request)
- elsif mr_status.nil? && create_mr_button?(from: branch.name, source_project: @project)
= render Pajamas::ButtonComponent.new(icon: 'merge-request', href: create_mr_path(from: branch.name, source_project: @project), button_options: { class: 'has-tooltip', title: _('New merge request') }) do

View File

@ -64,11 +64,10 @@ resource :profile, only: [] do
patch :skip
post :create_webauthn
delete :destroy_otp
delete :destroy_webauthn, path: 'destroy_webauthn/:id'
end
end
resources :webauthn_registrations, only: [:destroy]
resources :usage_quotas, only: [:index]
end
end

View File

@ -18,10 +18,6 @@
With the introduction of [GitLab CI/CD components for self-managed users](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance)
we are removing the redundant OpenTofu CI/CD templates in favor of the CI/CD components.
We introduced the OpenTofu CI/CD template in 16.8 because CI/CD components were not yet available for self-managed instances.
With the introduction of [GitLab CI/CD components for self-managed users](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance),
we are removing the redundant OpenTofu CI/CD templates in favor of the [OpenTofu CI/CD component](https://gitlab.com/components/opentofu).
For information about migrating from the CI/CD template to the component, see the [OpenTofu component documentation](https://gitlab.com/components/opentofu#usage-on-self-managed).
# ==============================

View File

@ -0,0 +1,8 @@
---
migration_job_name: BackfillPackagesNugetSymbolsProjectId
description: Populates the `project_id` column of `packages_nuget_symbols` table from the `packages_packages` table
feature_category: package_registry
introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/166743
milestone: '17.5'
queued_migration_version: 20240911173549
finalized_by: # version of the migration that finalized this BBM

View File

@ -10,4 +10,13 @@ milestone: '16.4'
gitlab_schema: gitlab_main_cell
allow_cross_foreign_keys:
- gitlab_main_clusterwide
sharding_key_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/461803
desired_sharding_key:
project_id:
references: projects
backfill_via:
parent:
foreign_key: package_id
table: packages_packages
sharding_key: project_id
belongs_to: package
desired_sharding_key_migration_job_name: BackfillPackagesNugetSymbolsProjectId

View File

@ -0,0 +1,9 @@
# frozen_string_literal: true
class AddProjectIdToPackagesNugetSymbols < Gitlab::Database::Migration[2.2]
milestone '17.5'
def change
add_column :packages_nuget_symbols, :project_id, :bigint
end
end

View File

@ -0,0 +1,40 @@
# frozen_string_literal: true
class QueueBackfillPackagesNugetSymbolsProjectId < Gitlab::Database::Migration[2.2]
milestone '17.5'
disable_ddl_transaction
restrict_gitlab_migration gitlab_schema: :gitlab_main
MIGRATION = 'BackfillPackagesNugetSymbolsProjectId'
DELAY_INTERVAL = 2.minutes
BATCH_SIZE = 1000
SUB_BATCH_SIZE = 200
def up
queue_batched_background_migration(
MIGRATION,
:packages_nuget_symbols,
:id,
:project_id,
:packages_packages,
:project_id,
:package_id,
job_interval: DELAY_INTERVAL,
batch_size: BATCH_SIZE,
sub_batch_size: SUB_BATCH_SIZE
)
end
def down
delete_batched_background_migration(
MIGRATION,
:packages_nuget_symbols,
:id, [
:project_id,
:packages_packages,
:project_id,
:package_id
]
)
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class AddIndexOnProjectIdToPackagesNugetSymbols < Gitlab::Database::Migration[2.2]
disable_ddl_transaction!
milestone '17.5'
INDEX_NAME = :index_packages_nuget_symbols_on_project_id
def up
add_concurrent_index :packages_nuget_symbols, :project_id, name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :packages_nuget_symbols, INDEX_NAME
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class IndexVulnNamespaceHistoricalStatisticsOnNamespaceIdAndId < Gitlab::Database::Migration[2.2]
INDEX_NAME = 'index_vuln_namespace_hist_statistics_for_traversal_ids_update'
disable_ddl_transaction!
milestone '17.6'
def up
add_concurrent_index :vulnerability_namespace_historical_statistics, %i[namespace_id id], name: INDEX_NAME
end
def down
remove_concurrent_index_by_name :vulnerability_namespace_historical_statistics, INDEX_NAME
end
end

View File

@ -0,0 +1,16 @@
# frozen_string_literal: true
class DropIndexVulnNamespaceHistoricalStatisticsOnNamespaceId < Gitlab::Database::Migration[2.2]
INDEX_NAME = 'index_vuln_namespace_historical_statistics_on_namespace_id'
disable_ddl_transaction!
milestone '17.6'
def up
remove_concurrent_index_by_name :vulnerability_namespace_historical_statistics, INDEX_NAME
end
def down
add_concurrent_index :vulnerability_namespace_historical_statistics, :namespace_id, name: INDEX_NAME
end
end

View File

@ -0,0 +1 @@
e19b15d7a9b309975802ca985c621511b42aaa65e1e9396ce4b99be477250ff4

View File

@ -0,0 +1 @@
56477eeec914a1c36f574701717eed3e95da99582cca948ed1e821b55394eba2

View File

@ -0,0 +1 @@
4071c464682efce384e53d8849c99a35acd0cdd2b3ba2d6f4815d89267254a8b

View File

@ -0,0 +1 @@
ad12c7ba1062f948604ab1906be59e5cdc9e0f3082dddb253a27ede6cd9dca94

View File

@ -0,0 +1 @@
627a32ec7f4eae28bea7ccd2ddd38a87041d351716c4495350999ced4c800d5d

View File

@ -15721,6 +15721,7 @@ CREATE TABLE packages_nuget_symbols (
object_storage_key text NOT NULL,
file_sha256 bytea,
status smallint DEFAULT 0 NOT NULL,
project_id bigint,
CONSTRAINT check_0e93ca58b7 CHECK ((char_length(file) <= 255)),
CONSTRAINT check_28b82b08fa CHECK ((char_length(object_storage_key) <= 255)),
CONSTRAINT check_30b0ef2ca2 CHECK ((char_length(file_path) <= 255)),
@ -30338,6 +30339,8 @@ CREATE UNIQUE INDEX index_packages_nuget_symbols_on_object_storage_key ON packag
CREATE INDEX index_packages_nuget_symbols_on_package_id ON packages_nuget_symbols USING btree (package_id);
CREATE INDEX index_packages_nuget_symbols_on_project_id ON packages_nuget_symbols USING btree (project_id);
CREATE UNIQUE INDEX index_packages_nuget_symbols_on_signature_and_file_path ON packages_nuget_symbols USING btree (signature, file_path);
CREATE INDEX index_packages_on_available_pypi_packages ON packages_packages USING btree (project_id, id) WHERE ((status = ANY (ARRAY[0, 1])) AND (package_type = 5) AND (version IS NOT NULL));
@ -31542,7 +31545,7 @@ CREATE INDEX index_vuln_mgmt_policy_rules_on_policy_mgmt_project_id ON vulnerabi
CREATE UNIQUE INDEX index_vuln_mgmt_policy_rules_on_unique_policy_rule_index ON vulnerability_management_policy_rules USING btree (security_policy_id, rule_index);
CREATE INDEX index_vuln_namespace_historical_statistics_on_namespace_id ON vulnerability_namespace_historical_statistics USING btree (namespace_id);
CREATE INDEX index_vuln_namespace_hist_statistics_for_traversal_ids_update ON vulnerability_namespace_historical_statistics USING btree (namespace_id, id);
CREATE UNIQUE INDEX index_vuln_namespace_historical_statistics_traversal_ids_date ON vulnerability_namespace_historical_statistics USING btree (traversal_ids, date);

View File

@ -385,7 +385,7 @@ DETAILS:
#### Memberships not granted
Sometimes you may think a particular user should be added to a GitLab group via
Sometimes you may think a particular user should be added to a GitLab group through
LDAP group sync, but for some reason it's not happening. You can check several
things to debug the situation.

View File

@ -272,7 +272,7 @@ changing Git remotes and API URLs.
```
NOTE:
Changing `external_url` does not prevent access via the old secondary URL, as
Changing `external_url` does not prevent access through the old secondary URL, as
long as the secondary DNS records are still intact.
1. Update the **secondary**'s SSL certificate:

View File

@ -218,7 +218,7 @@ This list of limitations only reflects the latest version of GitLab. If you are
- [Disaster recovery](disaster_recovery/index.md) for deployments that have multiple secondary sites causes downtime due to the need to re-initialize PostgreSQL streaming replication on all non-promoted secondaries to follow the new primary site.
- For Git over SSH, to make the project clone URL display correctly regardless of which site you are browsing, secondary sites must use the same port as the primary. [GitLab issue #339262](https://gitlab.com/gitlab-org/gitlab/-/issues/339262) proposes to remove this limitation.
- Git push over SSH against a secondary site does not work for pushes over 1.86 GB. [GitLab issue #413109](https://gitlab.com/gitlab-org/gitlab/-/issues/413109) tracks this bug.
- Backups [cannot be run on Geo secondary sites](replication/troubleshooting/replication.md#message-error-canceling-statement-due-to-conflict-with-recovery).
- Backups [cannot be run on Geo secondary sites](replication/troubleshooting/postgresql_replication.md#message-error-canceling-statement-due-to-conflict-with-recovery).
- Git push with options over SSH against a secondary site does not work and terminates the connection. For more information, see [issue 417186](https://gitlab.com/gitlab-org/gitlab/-/issues/417186).
- The Geo secondary site does not accelerate (serve) the clone request for the first stage of the pipeline in most cases. Later stages are not guaranteed to be served by the secondary site either, for example if the Git change is large, bandwidth is small, or pipeline stages are short. In general, it does serve the clone request for subsequent stages. [Issue 446176](https://gitlab.com/gitlab-org/gitlab/-/issues/446176) discusses the reasons for this and proposes an enhancement to increase the chance that Runner clone requests are served from the secondary site.
- When a single Git repository receives pushes at a high-enough rate, the secondary site's local copy can be perpetually out-of-date. This causes all Git fetches of that repository to be forwarded to the primary site. See [GitLab issue #455870](https://gitlab.com/gitlab-org/gitlab/-/issues/455870).

View File

@ -211,7 +211,7 @@ The 3 status items are defined as follows:
To find more details about failed items, check
[the `gitlab-rails/geo.log` file](../../../logs/log_parsing.md#find-most-common-geo-sync-errors)
If you notice replication or verification failures, you can try to [resolve them](replication.md#fixing-non-postgresql-replication-failures).
If you notice synchronization or verification failures, you can try to [resolve them](replication.md).
If there are Repository check failures, you can try to [resolve them](synchronization.md#find-repository-check-failures-in-a-geo-secondary-site).

View File

@ -0,0 +1,237 @@
---
stage: Systems
group: Geo
info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments
---
# Troubleshooting Geo PostgreSQL replication
DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
The following sections outline troubleshooting steps for fixing replication error messages (indicated by `Database replication working? ... no` in the
[`geo:check` output](common.md#health-check-rake-task).
The instructions present here mostly assume a single-node Geo Linux package deployment, and might need to be adapted to different environments.
## Removing an inactive replication slot
Replication slots are marked as 'inactive' when the replication client (a secondary site) connected to the slot disconnects.
Inactive replication slots cause WAL files to be retained, because they are sent to the client when it reconnects and the slot becomes active once more.
If the secondary site is not able to reconnect, use the following steps to remove its corresponding inactive replication slot:
1. [Start a PostgreSQL console session](https://docs.gitlab.com/omnibus/settings/database.html#connecting-to-the-postgresql-database) on the Geo primary site's database node:
```shell
sudo gitlab-psql -d gitlabhq_production
```
NOTE:
Using `gitlab-rails dbconsole` does not work, because managing replication slots requires superuser permissions.
1. View the replication slots and remove them if they are inactive:
```sql
SELECT * FROM pg_replication_slots;
```
Slots where `active` is `f` are inactive.
- When this slot should be active, because you have a **secondary** site configured using that slot,
look for the [PostgreSQL logs](../../../logs/index.md#postgresql-logs) for the **secondary** site,
to view why the replication is not running.
- If you are no longer using the slot (for example, you no longer have Geo enabled), or the secondary site is no longer able to reconnect,
you should remove it using the PostgreSQL console session:
```sql
SELECT pg_drop_replication_slot('<name_of_inactive_slot>');
```
1. Follow either the steps [to remove that Geo site](../remove_geo_site.md) if it's no longer required,
or [re-initiate the replication process](../../setup/database.md#step-3-initiate-the-replication-process), which recreates the replication slot correctly.
## Message: `WARNING: oldest xmin is far in the past` and `pg_wal` size growing
If a replication slot is inactive,
the `pg_wal` logs corresponding to the slot are reserved forever
(or until the slot is active again). This causes continuous disk usage growth
and the following messages appear repeatedly in the
[PostgreSQL logs](../../../logs/index.md#postgresql-logs):
```plaintext
WARNING: oldest xmin is far in the past
HINT: Close open transactions soon to avoid wraparound problems.
You might also need to commit or roll back old prepared transactions, or drop stale replication slots.
```
To fix this, you should [remove the inactive replication slot](#removing-an-inactive-replication-slot) and re-initiate the replication.
## Message: `ERROR: replication slots can only be used if max_replication_slots > 0`?
This means that the `max_replication_slots` PostgreSQL variable needs to
be set on the **primary** database. This setting defaults to 1. You may need to
increase this value if you have more **secondary** sites.
Be sure to restart PostgreSQL for this to take effect. See the
[PostgreSQL replication setup](../../setup/database.md#postgresql-replication) guide for more details.
## Message: `replication slot "geo_secondary_my_domain_com" does not exist`
This error occurs when PostgreSQL does not have a replication slot for the
**secondary** site by that name:
```plaintext
FATAL: could not start WAL streaming: ERROR: replication slot "geo_secondary_my_domain_com" does not exist
```
You may want to rerun the [replication process](../../setup/database.md) on the **secondary** site .
## Message: "Command exceeded allowed execution time" when setting up replication?
This may happen while [initiating the replication process](../../setup/database.md#step-3-initiate-the-replication-process) on the **secondary** site,
and indicates your initial dataset is too large to be replicated in the default timeout (30 minutes).
Re-run `gitlab-ctl replicate-geo-database`, but include a larger value for
`--backup-timeout`:
```shell
sudo gitlab-ctl \
replicate-geo-database \
--host=<primary_node_hostname> \
--slot-name=<secondary_slot_name> \
--backup-timeout=21600
```
This gives the initial replication up to six hours to complete, rather than
the default 30 minutes. Adjust as required for your installation.
## Message: "PANIC: could not write to file `pg_xlog/xlogtemp.123`: No space left on device"
Determine if you have any unused replication slots in the **primary** database. This can cause large amounts of
log data to build up in `pg_xlog`.
[Removing the inactive slots](#removing-an-inactive-replication-slot) can reduce the amount of space used in the `pg_xlog`.
## Message: "ERROR: canceling statement due to conflict with recovery"
This error message occurs infrequently under typical usage, and the system is resilient
enough to recover.
However, under certain conditions, some database queries on secondaries may run
excessively long, which increases the frequency of this error message. This can lead to a situation
where some queries never complete due to being canceled on every replication.
These long-running queries are
[planned to be removed in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/34269),
but as a workaround, we recommend enabling
[`hot_standby_feedback`](https://www.postgresql.org/docs/10/hot-standby.html#HOT-STANDBY-CONFLICT).
This increases the likelihood of bloat on the **primary** site as it prevents
`VACUUM` from removing recently-dead rows. However, it has been used
successfully in production on GitLab.com.
To enable `hot_standby_feedback`, add the following to `/etc/gitlab/gitlab.rb`
on the **secondary** site:
```ruby
postgresql['hot_standby_feedback'] = 'on'
```
Then reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
To help us resolve this problem, consider commenting on
[the issue](https://gitlab.com/gitlab-org/gitlab/-/issues/4489).
## Message: `server certificate for "PostgreSQL" does not match host name`
If you see this error:
```plaintext
FATAL: could not connect to the primary server: server certificate for "PostgreSQL" does not match host name
```
This happens because the PostgreSQL certificate that the Linux package automatically creates contains
the Common Name `PostgreSQL`, but the replication is connecting to a different host and GitLab attempts to use
the `verify-full` SSL mode by default.
To fix this issue, you can either:
- Use the `--sslmode=verify-ca` argument with the `replicate-geo-database` command.
- For an already replicated database, change `sslmode=verify-full` to `sslmode=verify-ca`
in `/var/opt/gitlab/postgresql/data/gitlab-geo.conf` and run `gitlab-ctl restart postgresql`.
- [Configure SSL for PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#configuring-ssl)
with a custom certificate (including the host name that's used to connect to the database in the CN or SAN)
instead of using the automatically generated certificate.
## Message: `LOG: invalid CIDR mask in address`
This happens on wrongly-formatted addresses in `postgresql['md5_auth_cidr_addresses']`.
```plaintext
2020-03-20_23:59:57.60499 LOG: invalid CIDR mask in address "***"
2020-03-20_23:59:57.60501 CONTEXT: line 74 of configuration file "/var/opt/gitlab/postgresql/data/pg_hba.conf"
```
To fix this, update the IP addresses in `/etc/gitlab/gitlab.rb` under `postgresql['md5_auth_cidr_addresses']`
to respect the CIDR format (for example, `10.0.0.1/32`).
## Message: `LOG: invalid IP mask "md5": Name or service not known`
This happens when you have added IP addresses without a subnet mask in `postgresql['md5_auth_cidr_addresses']`.
```plaintext
2020-03-21_00:23:01.97353 LOG: invalid IP mask "md5": Name or service not known
2020-03-21_00:23:01.97354 CONTEXT: line 75 of configuration file "/var/opt/gitlab/postgresql/data/pg_hba.conf"
```
To fix this, add the subnet mask in `/etc/gitlab/gitlab.rb` under `postgresql['md5_auth_cidr_addresses']`
to respect the CIDR format (for example, `10.0.0.1/32`).
## Message: `Found data in the gitlabhq_production database`
If you receive the error `Found data in the gitlabhq_production database!` when running
`gitlab-ctl replicate-geo-database`, data was detected in the `projects` table. When one or more projects are detected, the operation
is aborted to prevent accidental data loss. To bypass this message, pass the `--force` option to the command.
## Message: `FATAL: could not map anonymous shared memory: Cannot allocate memory`
If you see this message, it means that the secondary site's PostgreSQL tries to request memory that is higher than the available memory. There is an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/381585) that tracks this problem.
Example error message in Patroni logs (located at `/var/log/gitlab/patroni/current` for Linux package installations):
```plaintext
2023-11-21_23:55:18.63727 FATAL: could not map anonymous shared memory: Cannot allocate memory
2023-11-21_23:55:18.63729 HINT: This error usually means that PostgreSQL's request for a shared memory segment exceeded available memory, swap space, or huge pages. To reduce the request size (currently 17035526144 bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing shared_buffers or max_connections.
```
The workaround is to increase the memory available to the secondary site's PostgreSQL nodes to match the memory requirements of the primary site's PostgreSQL nodes.
## Investigate causes of database replication lag
If the output of `sudo gitlab-rake geo:status` shows that `Database replication lag` remains significantly high over time, the primary node in database replication can be checked to determine the status of lag for
different parts of the database replication process. These values are known as `write_lag`, `flush_lag`, and `replay_lag`. For more information, see
[the official PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-REPLICATION-VIEW).
Run the following command from the primary Geo node's database to provide relevant output:
```shell
gitlab-psql -xc 'SELECT write_lag,flush_lag,replay_lag FROM pg_stat_replication;'
-[ RECORD 1 ]---------------
write_lag | 00:00:00.072392
flush_lag | 00:00:00.108168
replay_lag | 00:00:00.108283
```
If one or more of these values is significantly high, this could indicate a problem and should be investigated further. When determining the cause, consider that:
- `write_lag` indicates the time since when WAL bytes have been sent by the primary, then received to the secondary, but not yet flushed or applied.
- A high `write_lag` value may indicate degraded network performance or insufficient network speed between the primary and secondary nodes.
- A high `flush_lag` value may indicate degraded or sub-optimal disk I/O performance with the secondary node's storage device.
- A high `replay_lag` value may indicate long running transactions in PostgreSQL, or the saturation of a needed resource like the CPU.
- The difference in time between `write_lag` and `flush_lag` indicates that WAL bytes have been sent to the underlying storage system, but it has not reported that they were flushed.
This data is most likely not fully written to a persistent storage, and likely held in some kind of volatile write cache.
- The difference between `flush_lag` and `replay_lag` indicates WAL bytes that have been successfully persisted to storage, but could not be replayed by the database system.

View File

@ -10,216 +10,13 @@ DETAILS:
**Tier:** Premium, Ultimate
**Offering:** Self-managed
## Fixing PostgreSQL database replication errors
The following sections outline troubleshooting steps for fixing replication error messages (indicated by `Database replication working? ... no` in the
[`geo:check` output](common.md#health-check-rake-task).
The instructions present here mostly assume a single-node Geo Linux package deployment, and might need to be adapted to different environments.
### Removing an inactive replication slot
Replication slots are marked as 'inactive' when the replication client (a secondary site) connected to the slot disconnects.
Inactive replication slots cause WAL files to be retained, because they are sent to the client when it reconnects and the slot becomes active once more.
If the secondary site is not able to reconnect, use the following steps to remove its corresponding inactive replication slot:
1. [Start a PostgreSQL console session](https://docs.gitlab.com/omnibus/settings/database.html#connecting-to-the-postgresql-database) on the Geo primary site's database node:
```shell
sudo gitlab-psql -d gitlabhq_production
```
NOTE:
Using `gitlab-rails dbconsole` does not work, because managing replication slots requires superuser permissions.
1. View the replication slots and remove them if they are inactive:
```sql
SELECT * FROM pg_replication_slots;
```
Slots where `active` is `f` are inactive.
- When this slot should be active, because you have a **secondary** site configured using that slot,
look for the [PostgreSQL logs](../../../logs/index.md#postgresql-logs) for the **secondary** site,
to view why the replication is not running.
- If you are no longer using the slot (for example, you no longer have Geo enabled), or the secondary site is no longer able to reconnect,
you should remove it using the PostgreSQL console session:
```sql
SELECT pg_drop_replication_slot('<name_of_inactive_slot>');
```
1. Follow either the steps [to remove that Geo site](../remove_geo_site.md) if it's no longer required,
or [re-initiate the replication process](../../setup/database.md#step-3-initiate-the-replication-process), which recreates the replication slot correctly.
### Message: `WARNING: oldest xmin is far in the past` and `pg_wal` size growing
If a replication slot is inactive,
the `pg_wal` logs corresponding to the slot are reserved forever
(or until the slot is active again). This causes continuous disk usage growth
and the following messages appear repeatedly in the
[PostgreSQL logs](../../../logs/index.md#postgresql-logs):
```plaintext
WARNING: oldest xmin is far in the past
HINT: Close open transactions soon to avoid wraparound problems.
You might also need to commit or roll back old prepared transactions, or drop stale replication slots.
```
To fix this, you should [remove the inactive replication slot](#removing-an-inactive-replication-slot) and re-initiate the replication.
### Message: `ERROR: replication slots can only be used if max_replication_slots > 0`?
This means that the `max_replication_slots` PostgreSQL variable needs to
be set on the **primary** database. This setting defaults to 1. You may need to
increase this value if you have more **secondary** sites.
Be sure to restart PostgreSQL for this to take effect. See the
[PostgreSQL replication setup](../../setup/database.md#postgresql-replication) guide for more details.
### Message: `replication slot "geo_secondary_my_domain_com" does not exist`
This error occurs when PostgreSQL does not have a replication slot for the
**secondary** site by that name:
```plaintext
FATAL: could not start WAL streaming: ERROR: replication slot "geo_secondary_my_domain_com" does not exist
```
You may want to rerun the [replication process](../../setup/database.md) on the **secondary** site .
### Message: "Command exceeded allowed execution time" when setting up replication?
This may happen while [initiating the replication process](../../setup/database.md#step-3-initiate-the-replication-process) on the **secondary** site,
and indicates your initial dataset is too large to be replicated in the default timeout (30 minutes).
Re-run `gitlab-ctl replicate-geo-database`, but include a larger value for
`--backup-timeout`:
```shell
sudo gitlab-ctl \
replicate-geo-database \
--host=<primary_node_hostname> \
--slot-name=<secondary_slot_name> \
--backup-timeout=21600
```
This gives the initial replication up to six hours to complete, rather than
the default 30 minutes. Adjust as required for your installation.
### Message: "PANIC: could not write to file `pg_xlog/xlogtemp.123`: No space left on device"
Determine if you have any unused replication slots in the **primary** database. This can cause large amounts of
log data to build up in `pg_xlog`.
[Removing the inactive slots](#removing-an-inactive-replication-slot) can reduce the amount of space used in the `pg_xlog`.
### Message: "ERROR: canceling statement due to conflict with recovery"
This error message occurs infrequently under typical usage, and the system is resilient
enough to recover.
However, under certain conditions, some database queries on secondaries may run
excessively long, which increases the frequency of this error message. This can lead to a situation
where some queries never complete due to being canceled on every replication.
These long-running queries are
[planned to be removed in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/34269),
but as a workaround, we recommend enabling
[`hot_standby_feedback`](https://www.postgresql.org/docs/10/hot-standby.html#HOT-STANDBY-CONFLICT).
This increases the likelihood of bloat on the **primary** site as it prevents
`VACUUM` from removing recently-dead rows. However, it has been used
successfully in production on GitLab.com.
To enable `hot_standby_feedback`, add the following to `/etc/gitlab/gitlab.rb`
on the **secondary** site:
```ruby
postgresql['hot_standby_feedback'] = 'on'
```
Then reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
```
To help us resolve this problem, consider commenting on
[the issue](https://gitlab.com/gitlab-org/gitlab/-/issues/4489).
### Message: `server certificate for "PostgreSQL" does not match host name`
If you see this error:
```plaintext
FATAL: could not connect to the primary server: server certificate for "PostgreSQL" does not match host name
```
This happens because the PostgreSQL certificate that the Linux package automatically creates contains
the Common Name `PostgreSQL`, but the replication is connecting to a different host and GitLab attempts to use
the `verify-full` SSL mode by default.
To fix this issue, you can either:
- Use the `--sslmode=verify-ca` argument with the `replicate-geo-database` command.
- For an already replicated database, change `sslmode=verify-full` to `sslmode=verify-ca`
in `/var/opt/gitlab/postgresql/data/gitlab-geo.conf` and run `gitlab-ctl restart postgresql`.
- [Configure SSL for PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#configuring-ssl)
with a custom certificate (including the host name that's used to connect to the database in the CN or SAN)
instead of using the automatically generated certificate.
### Message: `LOG: invalid CIDR mask in address`
This happens on wrongly-formatted addresses in `postgresql['md5_auth_cidr_addresses']`.
```plaintext
2020-03-20_23:59:57.60499 LOG: invalid CIDR mask in address "***"
2020-03-20_23:59:57.60501 CONTEXT: line 74 of configuration file "/var/opt/gitlab/postgresql/data/pg_hba.conf"
```
To fix this, update the IP addresses in `/etc/gitlab/gitlab.rb` under `postgresql['md5_auth_cidr_addresses']`
to respect the CIDR format (for example, `10.0.0.1/32`).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
This happens when you have added IP addresses without a subnet mask in `postgresql['md5_auth_cidr_addresses']`.
```plaintext
2020-03-21_00:23:01.97353 LOG: invalid IP mask "md5": Name or service not known
2020-03-21_00:23:01.97354 CONTEXT: line 75 of configuration file "/var/opt/gitlab/postgresql/data/pg_hba.conf"
```
To fix this, add the subnet mask in `/etc/gitlab/gitlab.rb` under `postgresql['md5_auth_cidr_addresses']`
to respect the CIDR format (for example, `10.0.0.1/32`).
### Message: `Found data in the gitlabhq_production database`
If you receive the error `Found data in the gitlabhq_production database!` when running
`gitlab-ctl replicate-geo-database`, data was detected in the `projects` table. When one or more projects are detected, the operation
is aborted to prevent accidental data loss. To bypass this message, pass the `--force` option to the command.
### Message: `FATAL: could not map anonymous shared memory: Cannot allocate memory`
If you see this message, it means that the secondary site's PostgreSQL tries to request memory that is higher than the available memory. There is an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/381585) that tracks this problem.
Example error message in Patroni logs (located at `/var/log/gitlab/patroni/current` for Linux package installations):
```plaintext
2023-11-21_23:55:18.63727 FATAL: could not map anonymous shared memory: Cannot allocate memory
2023-11-21_23:55:18.63729 HINT: This error usually means that PostgreSQL's request for a shared memory segment exceeded available memory, swap space, or huge pages. To reduce the request size (currently 17035526144 bytes), reduce PostgreSQL's shared memory usage, perhaps by reducing shared_buffers or max_connections.
```
The workaround is to increase the memory available to the secondary site's PostgreSQL nodes to match the memory requirements of the primary site's PostgreSQL nodes.
## Fixing non-PostgreSQL replication failures
If you notice replication failures in `Admin > Geo > Sites` or the [Sync status Rake task](common.md#sync-status-rake-task), you can try to resolve the failures with the following general steps:
1. Geo automatically retries failures. If the failures are new and few in number, or if you suspect the root cause is already resolved, then you can wait to see if the failures go away.
1. If failures were present for a long time, then many retries have already occurred, and the interval between automatic retries has increased to up to 4 hours depending on the type of failure. If you suspect the root cause is already resolved, you can [manually retry replication or verification](#manually-retry-replication-or-verification).
1. If the failures persist, use the following sections to try to resolve them.
### Manually retry replication or verification
## Manually retry replication or verification
A Geo data type is a specific class of data that is required by one or more GitLab features to store relevant information and is replicated by Geo to secondary sites.
@ -258,7 +55,7 @@ With all this information, you can:
- [Manually resync and reverify individual components](#resync-and-reverify-individual-components)
- [Manually resync and reverify multiple components](#resync-and-reverify-multiple-components)
#### Resync and reverify individual components
### Resync and reverify individual components
[You can force a resync and reverify individual items](https://gitlab.com/gitlab-org/gitlab/-/issues/364727)
for all component types managed by the [self-service framework](../../../../development/geo/framework.md) using the UI.
@ -374,7 +171,7 @@ to enact the following, basic troubleshooting steps:
registry.replicator.verify_async
```
#### Resync and reverify multiple components
### Resync and reverify multiple components
NOTE:
There is an [issue to implement this functionality in the **Admin** area UI](https://gitlab.com/gitlab-org/gitlab/-/issues/364729).
@ -385,7 +182,7 @@ Commands that change data can cause damage if not run correctly or under the rig
The following sections describe how to use internal application commands in the [Rails console](../../../../administration/operations/rails_console.md#starting-a-rails-console-session)
to cause bulk replication or verification.
##### Reverify all components (or any SSF data type which supports verification)
#### Reverify all components (or any SSF data type which supports verification)
For GitLab 16.4 and earlier:
@ -404,7 +201,7 @@ For GitLab 16.4 and earlier:
For other SSF data types replace `Upload` in the command above with the desired model class.
##### Verify blob files on the secondary manually
#### Verify blob files on the secondary manually
This iterates over all package files on the secondary, looking at the
`verification_checksum` stored in the database (which came from the primary)
@ -564,33 +361,6 @@ Failed to open TCP connection to localhost:5000 (Connection refused - connect(2)
It happens if the container registry is not enabled on the secondary site. To fix it, check that the container registry
is [enabled on the secondary site](../../../packages/container_registry.md#enable-the-container-registry). Note that if [Lets Encrypt integration is disabled](https://docs.gitlab.com/omnibus/settings/ssl/#configure-https-manually), container registry is disabled as well, and you must [configure it manually](../../../packages/container_registry.md#configure-container-registry-under-its-own-domain).
## Investigate causes of database replication lag
If the output of `sudo gitlab-rake geo:status` shows that `Database replication lag` remains significantly high over time, the primary node in database replication can be checked to determine the status of lag for
different parts of the database replication process. These values are known as `write_lag`, `flush_lag`, and `replay_lag`. For more information, see
[the official PostgreSQL documentation](https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-REPLICATION-VIEW).
Run the following command from the primary Geo node's database to provide relevant output:
```shell
gitlab-psql -xc 'SELECT write_lag,flush_lag,replay_lag FROM pg_stat_replication;'
-[ RECORD 1 ]---------------
write_lag | 00:00:00.072392
flush_lag | 00:00:00.108168
replay_lag | 00:00:00.108283
```
If one or more of these values is significantly high, this could indicate a problem and should be investigated further. When determining the cause, consider that:
- `write_lag` indicates the time since when WAL bytes have been sent by the primary, then received to the secondary, but not yet flushed or applied.
- A high `write_lag` value may indicate degraded network performance or insufficient network speed between the primary and secondary nodes.
- A high `flush_lag` value may indicate degraded or sub-optimal disk I/O performance with the secondary node's storage device.
- A high `replay_lag` value may indicate long running transactions in PostgreSQL, or the saturation of a needed resource like the CPU.
- The difference in time between `write_lag` and `flush_lag` indicates that WAL bytes have been sent to the underlying storage system, but it has not reported that they were flushed.
This data is most likely not fully written to a persistent storage, and likely held in some kind of volatile write cache.
- The difference between `flush_lag` and `replay_lag` indicates WAL bytes that have been successfully persisted to storage, but could not be replayed by the database system.
## Resetting Geo **secondary** site replication
If you get a **secondary** site in a broken state and want to reset the replication state,

View File

@ -252,7 +252,7 @@ of a repository. When creating the first fork, we:
1. Create an object pool repository that contains all objects of the repository
that is about to be forked.
1. Link the repository to this new object pool via the alternates mechanism of Git.
1. Link the repository to this new object pool by using the alternates mechanism of Git.
1. Repack the repository so that it uses objects from the object pool. It thus
can drop its own copy of the objects.

View File

@ -131,8 +131,8 @@ WARNING:
Be careful when choosing the domain used for receiving incoming email.
For example, suppose your top-level company domain is `hooli.com`.
All employees in your company have an email address at that domain via Google
Apps, and your company's private Slack instance requires a valid `@hooli.com`
All employees in your company have an email address at that domain through Google
Workspace, and your company's private Slack instance requires a valid `@hooli.com`
email address to sign up.
If you also host a public-facing GitLab instance at `hooli.com` and set your

View File

@ -752,7 +752,7 @@ Plan.default.actual_limits.update!(dast_profile_schedules: 50)
The default maximum size of the CI artifacts archive is 5 megabytes.
You can change this limit via the [GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session).
You can change this limit by using the [GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session).
To update the maximum size of the CI artifacts archive,
update `max_artifacts_content_include_size` with the new value. For example, to set it to 20 MB:
@ -794,7 +794,7 @@ by multiplying [`max_yaml_size_bytes` (default 1 MB)](#maximum-size-and-depth-of
with [`ci_max_includes` (default 150)](../api/settings.md#list-of-settings-that-can-be-accessed-via-api-calls).
If both limits are unmodified, the default is set to 1 MB x 150 = `157286400` bytes (150 MB).
You can change this limit via the [GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session).
You can change this limit by using the [GitLab Rails console](operations/rails_console.md#starting-a-rails-console-session).
To update the maximum memory that can be allocated for the CI/CD configuration,
update `ci_max_total_yaml_size_bytes` with the new value. For example, to set it to 20 MB:

View File

@ -211,7 +211,7 @@ License.current # check to make sure it applied
```
These snippets can be saved to a file and executed [using the Rails Runner](operations/rails_console.md#using-the-rails-runner) so the
license can be applied via shell automation scripts.
license can be applied through shell automation scripts.
This is needed for example in a known edge-case with
[expired license and multiple LDAP servers](../administration/auth/ldap/ldap-troubleshooting.md#expired-license-causes-errors-with-multiple-ldap-servers).

View File

@ -313,7 +313,7 @@ grep "fatal: " current |
### Parsing `gitlab-shell/gitlab-shell.log`
For investigating Git calls via SSH.
For investigating Git calls through SSH.
Find the top 20 calls by project and user:

View File

@ -255,7 +255,7 @@ The user's state is set to active and they consume a
[seat](../subscriptions/self_managed/index.md#billable-users).
NOTE:
A deactivated user can also activate their account themselves by logging back in via the UI.
A deactivated user can also activate their account themselves by logging back in through the UI.
Users can also be activated using the [GitLab API](../api/user_moderation.md#activate-a-user).
## Ban and unban users

View File

@ -876,7 +876,7 @@ with the Fog library that GitLab uses. Symptoms include an error in `production.
### Artifacts always downloaded with filename `download`
Downloaded artifact filenames are set via the `response-content-disposition` header in the
Downloaded artifact filenames are set with the `response-content-disposition` header in the
[GetObject request](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html).
If the S3 provider does not support this header, the downloaded file is always saved as `download`.

View File

@ -275,8 +275,8 @@ Additionally, `current_connections` should be greater than 1.
### Message: `LOG: invalid CIDR mask in address`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/replication.md#message-log--invalid-cidr-mask-in-address).
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/postgresql_replication.md#message-log--invalid-cidr-mask-in-address).
### Message: `LOG: invalid IP mask "md5": Name or service not known`
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/replication.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).
See the suggested fix [in Geo documentation](../geo/replication/troubleshooting/postgresql_replication.md#message-log--invalid-ip-mask-md5-name-or-service-not-known).

View File

@ -377,7 +377,7 @@ Refer to `group_rename` and `user_rename` for that case.
}
```
If the user is blocked via LDAP, `state` is `ldap_blocked`.
If the user is blocked through LDAP, `state` is `ldap_blocked`.
**User renamed:**

View File

@ -286,7 +286,7 @@ HINT: Free one or increase max_replication_slots.
### Geo replication errors
If you receive errors like this example, read about how to resolve
[Geo replication errors](../geo/replication/troubleshooting/replication.md#fixing-postgresql-database-replication-errors):
[Geo replication errors](../geo/replication/troubleshooting/postgresql_replication.md):
```plaintext
ERROR: replication slots can only be used if max_replication_slots > 0

View File

@ -214,9 +214,9 @@ requirements:
**Two-factor authentication** - [GitLab supports the following second factors](../user/profile/account/two_factor_authentication.md):
- Time-based one-time passwords
- One-time password authenticators
- WebAuthN devices
- WebAuthn devices
[Instructions for enabling two-factor authentication](../user/profile/account/two_factor_authentication.md#enable-two-factor-authentication)
are provided in the documentation. Customers pursuing FedRAMP must consider
@ -224,7 +224,7 @@ two-factor providers that are FedRAMP authorized and support FIPS
requirements. FedRAMP authorized providers can be found on the [FedRAMP Marketplace](https://marketplace.fedramp.gov/products).
When selecting a second factor, it is important to note that NIST and
FedRAMP are now indicating that phishing resistant authentication, such
as WebAuthN, must be used (IA-2).
as WebAuthn, must be used (IA-2).
**SSH keys**

View File

@ -17,5 +17,5 @@ unauthorized access and potential data breaches.
You should rotate the secrets of all third-party integrations at least yearly.
An incomplete list of such secrets:
- [FortiAuthenticator](../user/profile/account/two_factor_authentication.md#enable-one-time-password-using-fortiauthenticator)
- [FortiToken Cloud](../user/profile/account/two_factor_authentication.md#enable-one-time-password-using-fortitoken-cloud)
- [FortiAuthenticator](../user/profile/account/two_factor_authentication.md#enable-a-one-time-password-authenticator-using-fortiauthenticator)
- [FortiToken Cloud](../user/profile/account/two_factor_authentication.md#enable-a-one-time-password-authenticator-using-fortitoken-cloud)

View File

@ -220,7 +220,7 @@ Then authenticate by either:
- Entering the correct OTP.
- In GitLab 15.3 and later, responding to a device push notification if
[FortiAuthenticator is enabled](../user/profile/account/two_factor_authentication.md#enable-one-time-password-using-fortiauthenticator).
[FortiAuthenticator is enabled](../user/profile/account/two_factor_authentication.md#enable-a-one-time-password-authenticator-using-fortiauthenticator).
After successful authentication, you can perform [Git over SSH operations](../development/gitlab_shell/features.md#git-operations) for 15 minutes (default) with the associated
SSH key.

View File

@ -486,10 +486,6 @@ We introduces the OpenTofu CI/CD template in 16.8 as CI/CD components were not a
With the introduction of [GitLab CI/CD components for self-managed users](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance)
we are removing the redundant OpenTofu CI/CD templates in favor of the CI/CD components.
We introduced the OpenTofu CI/CD template in 16.8 because CI/CD components were not yet available for self-managed instances.
With the introduction of [GitLab CI/CD components for self-managed users](https://docs.gitlab.com/ee/ci/components/#use-a-gitlabcom-component-in-a-self-managed-instance),
we are removing the redundant OpenTofu CI/CD templates in favor of the [OpenTofu CI/CD component](https://gitlab.com/components/opentofu).
For information about migrating from the CI/CD template to the component, see the [OpenTofu component documentation](https://gitlab.com/components/opentofu#usage-on-self-managed).
</div>

View File

@ -111,7 +111,7 @@ through the AMI process, or upgrade the package itself:
and a redeployment using AMIs issues the nodes with new hostnames. Even though
the storage is the same, Gitaly Cluster does not work when the hostnames change.
The Praefect nodes, however, can be upgraded via an AMI redeployment process:
The Praefect nodes, however, can be upgraded by using an AMI redeployment process:
1. The AMI redeployment process must include `gitlab-ctl reconfigure`.
Set `praefect['auto_migrate'] = false` on the AMI so all nodes get this. This

View File

@ -26,8 +26,8 @@ Achieving _true_ zero downtime as part of an upgrade is notably difficult for an
this guide has been tested as given against our HA [Reference Architectures](../administration/reference_architectures/index.md)
and was found to result in effectively no observable downtime, but please be aware your mileage may vary dependent on the specific system makeup.
For additional confidence some customers have found success via further techniques such as the
manual draining of nodes via specific load balancer or infrastructure capabilities. These techniques depend greatly
For additional confidence, some customers have found success with further techniques such as the
manually draining nodes by using specific load balancer or infrastructure capabilities. These techniques depend greatly
on the underlying infrastructure capabilities and as a result are not covered in this guide.
For any additional information please reach out to your GitLab representative
or the [Support team](https://about.gitlab.com/support/).
@ -58,7 +58,7 @@ In addition to the above, please be aware of the following considerations:
- The time necessary to complete these migrations can be reduced by increasing the number of Sidekiq workers that can process jobs in the
`background_migration` queue. To see the size of this queue, [check for background migrations before upgrading](background_migrations.md).
- [PostgreSQL major version upgrades](../administration/postgresql/replication_and_failover.md#near-zero-downtime-upgrade-of-postgresql-in-a-patroni-cluster) are a separate process and not covered by zero-downtime upgrades (smaller upgrades are covered).
- Zero-downtime upgrades are supported for any GitLab components you've deployed with the GitLab Linux package. If you've deployed select components via a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services will need to be performed separately as per their standard processes.
- Zero-downtime upgrades are supported for any GitLab components you've deployed with the GitLab Linux package. If you've deployed select components through a supported third party service, such as PostgreSQL in AWS RDS or Redis in GCP Memorystore, upgrades for those services will need to be performed separately as per their standard processes.
- As a general guideline, the larger amount of data you have, the more time it will take for the upgrade to complete. In testing, any database smaller than 10 GB shouldn't generally take longer than an hour, but your mileage may vary.
NOTE:
@ -147,7 +147,7 @@ This process applies to both Gitaly Sharded and Cluster setups. Run through the
### Praefect
For Gitaly Cluster setups, Praefect will be deployed and needs to be upgraded in similar fashion via a graceful reload.
For Gitaly Cluster setups, you must deploy and upgrade Praefect in a similar way by using a graceful reload.
NOTE:
The upgrade process attempts to do a graceful handover to a new Praefect process.
@ -213,24 +213,26 @@ nodes to be a deploy node. This target node will be configured to run migrations
Rails as a webserver consists primarily of [Puma](../administration/operations/puma.md), [Workhorse](../development/workhorse/index.md), and [NGINX](../development/architecture.md#nginx).
Each of these components have different behaviours when it comes to doing a live upgrade. While Puma can allow
for a graceful reload, Workhorse doesn't. As such, the best approach is to drain the node gracefully through other means such as via your Load Balancer. It's also possible to do this via NGINX on the node through its graceful shutdown functionality. In this section we'll use the NGINX approach.
for a graceful reload, Workhorse doesn't. The best approach is to drain the node gracefully through other means,
such as by using your load balancer. You can also do this by using NGINX on the node through its graceful shutdown
functionality. This section explains the NGINX approach.
In addition to the above, Rails is where the main database migrations need to be executed. Like Praefect, this is best done via the deploy node approach. If PgBouncer is currently being used, it also needs to be bypassed as Rails uses an advisory lock when attempting to run a migration to prevent concurrent migrations from running on the same database. These locks are not shared across transactions, resulting in `ActiveRecord::ConcurrentMigrationError` and other issues when running database migrations using PgBouncer in transaction pooling mode.
In addition to the above, Rails is where the main database migrations need to be executed. Like Praefect, the best approach is by using the deploy node. If PgBouncer is currently being used, it also needs to be bypassed as Rails uses an advisory lock when attempting to run a migration to prevent concurrent migrations from running on the same database. These locks are not shared across transactions, resulting in `ActiveRecord::ConcurrentMigrationError` and other issues when running database migrations using PgBouncer in transaction pooling mode.
1. On the **Rails deploy node**:
1. Drain the node of traffic gracefully. This can be done in various ways, but one approach is via
NGINX by sending it a `QUIT` signal and then stopping the service. As an example this could be
done via the following shell script:
1. Drain the node of traffic gracefully. You can do this in various ways, but one
approach is to use NGINX by sending it a `QUIT` signal and then stopping the service.
As an example, you can do this by using the following shell script:
```shell
# Send QUIT to NGINX master process to drain and exit
NGINX_PID=$(cat /var/opt/gitlab/nginx/nginx.pid)
kill -QUIT $NGINX_PID
# Wait for drain to complete
while kill -0 $NGINX_PID 2>/dev/null; do sleep 1; done
# Stop NGINX service to prevent automatic restarts
gitlab-ctl stop nginx
```
@ -261,18 +263,18 @@ In addition to the above, Rails is where the main database migrations need to be
1. On every **other Rails node** sequentially:
1. Drain the node of traffic gracefully. This can be done in various ways, but one approach is via
NGINX by sending it a `QUIT` signal and then stopping the service. As an example this could be
done via the following shell script:
1. Drain the node of traffic gracefully. You can do this in various ways, but one
approach is to use NGINX by sending it a `QUIT` signal and then stopping the service.
As an example, you can do this by using the following shell script:
```shell
# Send QUIT to NGINX master process to drain and exit
NGINX_PID=$(cat /var/opt/gitlab/nginx/nginx.pid)
kill -QUIT $NGINX_PID
# Wait for drain to complete
while kill -0 $NGINX_PID 2>/dev/null; do sleep 1; done
# Stop NGINX service to prevent automatic restarts
gitlab-ctl stop nginx
```
@ -378,9 +380,9 @@ below:
1. On the **Rails deploy node**:
1. Drain the node of traffic gracefully. This can be done in various ways, but one approach is via
NGINX by sending it a `QUIT` signal and then stopping the service. As an example this could be
done via the following shell script:
1. Drain the node of traffic gracefully. You can do this in various ways, but one
approach is to use NGINX by sending it a `QUIT` signal and then stopping the service.
As an example, you can do this by using the following shell script:
```shell
# Send QUIT to NGINX master process to drain and exit
@ -428,9 +430,9 @@ below:
1. On every **other Rails node** sequentially:
1. Drain the node of traffic gracefully. This can be done in various ways, but one approach is via
NGINX by sending it a `QUIT` signal and then stopping the service. As an example this could be
done via the following shell script:
1. Drain the node of traffic gracefully. You can do this in various ways, but one
approach is to use NGINX by sending it a `QUIT` signal and then stopping the service.
As an example, you can do this by using the following shell script:
```shell
# Send QUIT to NGINX master process to drain and exit

View File

@ -75,7 +75,7 @@ tips for optimizing DAST scans in a [blog post](https://about.gitlab.com/blog/20
For information on this, see the [general Application Security troubleshooting section](../../../ci/jobs/job_artifacts_troubleshooting.md#error-message-no-files-to-upload).
## Getting error `dast job: chosen stage does not exist` when including DAST CI template
## Getting error `dast job: chosen stage dast does not exist` when including DAST CI template
To avoid overwriting stages from other CI files, newer versions of the DAST CI template do not
define stages. If you recently started using `DAST.latest.gitlab-ci.yml` or upgraded to a new major

View File

@ -438,7 +438,7 @@ The above `.gitlab-ci.yml` causes a linting error:
```plaintext
Unable to create pipeline
- dependency_scanning job: chosen stage does not exist; available stages are .pre
- dependency_scanning job: chosen stage test does not exist; available stages are .pre
- unit-tests
- .post
```

View File

@ -44,13 +44,47 @@ Before you can use GitLab Duo Workflow in VS Code:
### Install Docker and set the socket file path
1. Download the [script](https://gitlab.com/-/snippets/3745948). This downloads Docker, Colima, pulls workflow generic image and updates VS code settings to update Docker context for Duo Workflow. You will need to [Authenticate with the GitLab container registry](../packages/container_registry/authenticate_with_container_registry.md) to pull the generic workflow image. You can run the script with `--dry-run` flag to know the dependencies that will be installed with the script.
Duo Workflow needs an execution platform where it can execute arbitrary code,
read and write files, and make API calls to GitLab.
#### Automated setup
Installs Docker, Colima, and sets Docker socket path in VS Code settings.
You can run the script with the `--dry-run` flag to check the dependencies
that get installed with the script.
1. Download the [script](https://gitlab.com/-/snippets/3745948).
1. Run the script.
```shell
chmod +x duo_workflow_runtime.sh
./duo_workflow_runtime.sh
```
chmod +x duo_workflow_runtime.sh
./duo_workflow_runtime.sh
```
#### Manual setup
Sets socket path if you have
[Docker or Docker alternatives](https://handbook.gitlab.com/handbook/tools-and-tips/mac/#docker-desktop)
installed already.
1. Access VS Code settings:
- On Mac: <kbd>Cmd</kbd> + <kbd>,</kbd>
- On Windows and Linux: <kbd>Ctrl</kbd> + <kbd>,</kbd>
1. In the upper-right corner, select the **Open Settings (JSON)** icon.
1. Ensure the Docker socket settings are configured. If not, add this line to `settings.json` and save:
- For Rancher Desktop
```json
"gitlab.duoWorkflow.dockerSocket": "/Users/<username>/.rd/docker.sock"
```
- For Colima
```json
"gitlab.duoWorkflow.dockerSocket":
"/Users/<username>/.colima/default/docker.sock"
```
## Use GitLab Duo Workflow in VS Code
@ -121,7 +155,7 @@ If you encounter issues:
1. Check that your open project in VS Code corresponds to the GitLab project you want to interact with.
1. Ensure that you've checked out the branch as well.
1. Check your Docker and Docker socket configuration:
1. Follow the setup instructions again.
1. Try [manual](#manual-setup) Docker socket configuration.
1. If using Colima and encountering issues, try restarting it:
```shell

View File

@ -17,7 +17,7 @@ On self-managed GitLab, by default this feature is not available. To make it ava
This is the group-level documentation. For self-managed instances, see the [administration documentation](../../../administration/reporting/git_abuse_rate_limit.md).
Git abuse rate limiting is a feature to automatically ban users who download, clone, pull, fetch, or fork more than a specified number of repositories of a group in a given time frame. Banned users cannot access the top-level group or any of its non-public subgroups via HTTP or SSH. The rate limit also applies to users who authenticate with [personal](../../../user/profile/personal_access_tokens.md) or [group access tokens](../../../user/group/settings/group_access_tokens.md), as well as [CI/CD job tokens](../../../ci/jobs/ci_job_token.md). Access to unrelated groups is unaffected.
Git abuse rate limiting is a feature to automatically ban users who download, clone, pull, fetch, or fork more than a specified number of repositories of a group in a given time frame. Banned users cannot access the top-level group or any of its non-public subgroups through HTTP or SSH. The rate limit also applies to users who authenticate with [personal](../../../user/profile/personal_access_tokens.md) or [group access tokens](../../../user/group/settings/group_access_tokens.md), as well as [CI/CD job tokens](../../../ci/jobs/ci_job_token.md). Access to unrelated groups is unaffected.
Git abuse rate limiting does not apply to top-level group owners, [deploy tokens](../../../user/project/deploy_tokens/index.md), or [deploy keys](../../../user/project/deploy_keys/index.md).

View File

@ -91,7 +91,9 @@ Administrators can find a user's maximum permissions for a group or project.
The following error typically occurs when the user belongs to an external group that has been shared with your [projects](../project/members/sharing_projects_groups.md) or [groups](../project/members/sharing_projects_groups.md#invite-a-group-to-a-group):
<!-- vale gitlab_base.LatinTerms = NO -->
`Members who were invited via a group invitation cannot be removed. You can either remove the entire group, or ask an Owner of the invited group to remove the member.`
<!-- vale gitlab_base.LatinTerms = YES -->
To remove the user as a billable member, follow one of these options:

View File

@ -64,11 +64,11 @@ For most package types, the following credential types are valid:
allows access to packages in the project running the job for the users running the pipeline.
Access to other external projects can be configured.
- If your organization uses two factor authentication (2FA), you must use a personal access token with the scope set to `api`.
- If you are publishing a package via CI/CD pipelines, you must use a CI job token.
- If you are publishing a package by using CI/CD pipelines, you must use a CI job token.
NOTE:
If the "Package registry" feature is turned off for your project at **Settings > General > Visibility, project features, permissions**, you will receive a 403 Forbidden response.
Accessing package registry via deploy token is not available when external authorization is enabled.
Accessing the package registry with a deploy token is not available when external authorization is enabled.
## Use GitLab CI/CD

View File

@ -15,13 +15,13 @@ your account, they would need your username and password _and_ access to your se
GitLab supports as a second factor of authentication:
- Time-based one-time passwords ([TOTP](https://datatracker.ietf.org/doc/html/rfc6238)). When enabled, GitLab prompts
you for a code when you sign in. Codes are generated by your one-time password authenticator (for example, a password
- One-time password authenticators ([OTP](https://datatracker.ietf.org/doc/html/rfc6238)). When enabled, GitLab prompts
you for a code when you sign in. Codes are generated by your OTP authenticator (for example, a password
manager on one of your devices).
- WebAuthn devices. You're prompted to activate your WebAuthn device (usually by pressing a button on it) when
you supply your username and password to sign in. This performs secure authentication on your behalf.
If you set up a device, also set up a TOTP so you can still access your account if you lose the device.
If you set up a device, also set up an OTP so you can still access your account if you lose the device.
## Use personal access tokens with two-factor authentication
@ -54,14 +54,14 @@ git-credential-oauth is an open-source project supported by the community.
You can enable 2FA using a:
- One-time password authenticator. After you enable 2FA, back up your [recovery codes](#recovery-codes).
- OTP authenticator. After you enable 2FA, back up your [recovery codes](#recovery-codes).
- WebAuthn device.
Your account email must be confirmed to enable 2FA.
### Enable one-time password
### Enable a one-time password authenticator
To enable 2FA with a one-time password:
To enable 2FA with an OTP authenticator:
1. **In GitLab:**
1. Access your [**User settings**](../index.md#access-your-user-settings).
@ -89,13 +89,13 @@ To enable 2FA with a one-time password:
If you entered the correct pin, GitLab displays a list of [recovery codes](#recovery-codes). Download them and keep them
in a safe place.
### Enable one-time password using FortiAuthenticator
### Enable a one-time password authenticator using FortiAuthenticator
On self-managed GitLab, by default this feature is not available. To make it available per user, an administrator can
[enable the feature flag](../../../administration/feature_flags.md) named `forti_authenticator`.
On GitLab.com and GitLab Dedicated, this feature is not available.
You can use FortiAuthenticator as a one-time password (OTP) provider in GitLab. Users must:
You can use FortiAuthenticator as an OTP provider in GitLab. Users must:
- Exist in both FortiAuthenticator and GitLab with the same username.
- Have FortiToken configured in FortiAuthenticator.
@ -150,7 +150,7 @@ Configure FortiAuthenticator in GitLab. On your GitLab server:
(Linux package installations) or [restart](../../../administration/restart_gitlab.md#self-compiled-installations)
(self-compiled installations).
### Enable one-time password using Cisco Duo
### Enable a one-time password authenticator using Cisco Duo
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/15760) in GitLab 15.10.
@ -216,7 +216,7 @@ On your GitLab server:
1. For Linux package installations, [reconfigure GitLab](../../../administration/restart_gitlab.md#reconfigure-a-linux-package-installation).
For self-compiled installations, [restart GitLab](../../../administration/restart_gitlab.md#self-compiled-installations).
### Enable one-time password using FortiToken Cloud
### Enable a one-time password authenticator using FortiToken Cloud
DETAILS:
**Offering:** Self-managed
@ -227,7 +227,7 @@ On self-managed GitLab, by default this feature is not available. To make it ava
On GitLab.com and GitLab Dedicated, this feature is not available.
This feature is not ready for production use.
You can use FortiToken Cloud as a one-time password (OTP) provider in GitLab. Users must:
You can use FortiToken Cloud as an OTP provider in GitLab. Users must:
- Exist in both FortiToken Cloud and GitLab with the same username.
- Have FortiToken configured in FortiToken Cloud.
@ -298,7 +298,7 @@ WebAuthn is [supported by](https://caniuse.com/#search=webauthn) the following:
To set up 2FA with a WebAuthn-compatible device:
1. Optional. [Set up a one-time password](#enable-one-time-password).
1. Optional. [Set up an OTP authenticator](#enable-a-one-time-password-authenticator).
1. Access your [**User settings**](../index.md#access-your-user-settings).
1. Select **Account**.
1. Select **Enable Two-Factor Authentication**.
@ -325,8 +325,8 @@ You can lose access to your account if you clear your browser data.
## Recovery codes
Immediately after successfully enabling 2FA with a one-time password, you're prompted to download
a set of generated recovery codes. If you ever lose access to your one-time password authenticator, you can use one of
Immediately after successfully enabling 2FA with an OTP authenticator, you're prompted to download
a set of generated recovery codes. If you ever lose access to your OTP authenticator, you can use one of
these recovery codes to sign in to your account.
WARNING:
@ -361,9 +361,9 @@ If you regenerate 2FA recovery codes, save them. You can't use any previously cr
Signing in with 2FA enabled is only slightly different than the typical sign-in process. Enter your username and password
and you're presented with a second prompt, depending on which type of 2FA you've enabled.
### Sign in using a one-time password
### Sign in using a one-time password authenticator
When asked, enter the pin from your one-time password authenticator application or a recovery code to sign in.
When asked, enter the pin from your OTP authenticator or a recovery code to sign in.
### Sign in using a WebAuthn device
@ -375,7 +375,7 @@ in.
## Disable two-factor authentication
You can disable the TOTP authenticator and WebAuthn devices individually or simultaneously. To disable them simultaneously:
You can disable the OTP authenticator and WebAuthn devices individually or simultaneously. To disable them simultaneously:
1. Access your [**User settings**](../index.md#access-your-user-settings).
1. Select **Account**.
@ -392,7 +392,7 @@ DETAILS:
**Offering:** Self-managed
- Take care that 2FA keeps working after [restoring a GitLab backup](../../../administration/backup_restore/index.md).
- To ensure 2FA authorizes correctly with a time-based one-time password (TOTP) server, synchronize your GitLab
- To ensure 2FA authorizes correctly with an OTP server, synchronize your GitLab
server's time using a service like NTP. Otherwise, authorization can always fail because of time differences.
- The GitLab WebAuthn implementation does _not_ work when the GitLab instance is accessed from multiple hostnames
or FQDNs. Each WebAuthn registration is linked to the _current hostname_ at the time of registration, and

View File

@ -475,7 +475,7 @@ a session if the browser is closed or the existing session expires.
- [Sign-ins from unknown IP addresses or devices](notifications.md#notifications-for-unknown-sign-ins)
- [Attempted sign-ins using incorrect verification codes](notifications.md#notifications-for-attempted-sign-ins-using-incorrect-verification-codes)
- Manage applications that can [use GitLab as an OAuth provider](../../integration/oauth_provider.md)
- Manage [personal access tokens](personal_access_tokens.md) to access your account via API and authorized applications
- Manage [SSH keys](../ssh.md) to access your account via SSH
- Manage [personal access tokens](personal_access_tokens.md) to access your account through the API and authorized applications
- Manage [SSH keys](../ssh.md) to access your account by using SSH
- [Change the syntax highlighting theme](preferences.md#change-the-syntax-highlighting-theme)
- [View your active sessions](active_sessions.md) and revoke any of them if necessary

View File

@ -35,7 +35,7 @@ You might receive notifications for one of the following reasons:
or edit, or someone mentions <sup>1</sup> you.
- You've [enabled notifications in an issue, merge request, or epic](#notifications-on-issues-merge-requests-and-epics).
- You've configured notifications for the [project](#change-level-of-project-notifications) or [group](#group-notifications).
- You're subscribed to group or project pipeline notifications via the pipeline emails [integration](../project/integrations/index.md).
- You're subscribed to group or project pipeline notifications through the pipeline emails [integration](../project/integrations/index.md).
1. GitLab doesn't send a notification when
[a comment is edited to include a user mention](../discussions/index.md#edit-a-comment-to-add-a-mention).

View File

@ -209,14 +209,14 @@ if not already enabled. If a wildcard DNS entry was created resolving to the
Load Balancer, enter it in the `domain` field under the Auto DevOps settings.
Otherwise, the deployed app isn't externally available outside of the cluster.
![Deploy Pipeline](img/pipeline.png)
![Deploy Pipeline](img/pipeline_v11_0.png)
GitLab creates a new pipeline, which begins to build, test, and deploy the app.
After the pipeline has finished, your app runs in EKS, and is available
to users. Select **Operate > Environments**.
![Deployed Environment](img/environment.png)
![Deployed Environment](img/environment_v11_0.png)
GitLab displays a list of the environments and their deploy status, as well as
options to browse to the app, view monitoring metrics, and even access a shell
@ -299,7 +299,7 @@ Check that:
[external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
match the value defined in the **Trust relationships** tab in AWS:
![AWS IAM Trust relationships](img/aws_iam_role_trust.png)
![AWS IAM Trust relationships](img/aws_iam_role_trust_v13_7.png)
### Could not load Security Groups for this VPC

View File

@ -209,7 +209,7 @@ After about 10 minutes, your cluster is ready.
## Disable Role-Based Access Control (RBAC) (optional)
When connecting a cluster via GitLab integration, you may specify whether the
When connecting a cluster through GitLab integration, you may specify whether the
cluster is RBAC-enabled or not. This affects how GitLab interacts with the
cluster for certain operations. If you did *not* check the **RBAC-enabled cluster**
checkbox at creation time, GitLab assumes RBAC is disabled for your cluster

View File

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 56 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

View File

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 35 KiB

View File

Before

Width:  |  Height:  |  Size: 53 KiB

After

Width:  |  Height:  |  Size: 53 KiB

View File

Before

Width:  |  Height:  |  Size: 62 KiB

After

Width:  |  Height:  |  Size: 62 KiB

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

View File

@ -27,7 +27,7 @@ pre-written code blocks or database queries against a given environment.
## Executable Runbooks
The JupyterHub app offered via the GitLab Kubernetes integration now ships
The JupyterHub app offered with the GitLab Kubernetes integration now ships
with Nurtch's Rubix library, providing a simple way to create DevOps
runbooks. A sample runbook is provided, showcasing common operations. While
Rubix makes it simple to create common Kubernetes and AWS workflows, you can
@ -94,8 +94,8 @@ the components outlined above and the pre-loaded demo runbook.
- email
JupyterHub:
authenticator_class: gitlab
extraConfig:
gitlab-config: |
extraConfig:
gitlab-config: |
c.KubeSpawner.cmd = ['jupyter-labhub']
c.GitLabOAuthenticator.scope = ['api read_repository write_repository']
@ -149,7 +149,7 @@ the components outlined above and the pre-loaded demo runbook.
GitLab instance with OAuth2. This button redirects you to a page at GitLab
requesting authorization for JupyterHub to use your GitLab account.
![authorize Jupyter](img/authorize-jupyter.png)
![authorize Jupyter](img/authorize_jupyter_v11_6.png)
1. Select **Authorize**, and GitLab redirects you to the JupyterHub application.
1. Select **Start My Server** to start the server in a few seconds.
@ -159,11 +159,11 @@ the components outlined above and the pre-loaded demo runbook.
1. Select the **DevOps-Runbook-Demo** folder located on the left panel.
![demo runbook](img/demo-runbook.png)
![demo runbook](img/demo_runbook_v11_6.png)
1. Select the `Nurtch-DevOps-Demo.ipynb` runbook.
![sample runbook](img/sample-runbook.png)
![sample runbook](img/sample_runbook_v11_6.png)
Jupyter displays the runbook's contents in the right-hand side of the screen.
The **Setup** section displays your `PRIVATE_TOKEN` and your `PROJECT_ID`.
@ -197,14 +197,14 @@ the components outlined above and the pre-loaded demo runbook.
1. Go to **Settings > CI/CD > Variables** to create
the variables in your project.
![GitLab variables](img/gitlab-variables.png)
![GitLab variables](img/gitlab_variables_v11_6.png)
1. Select **Save variables**.
1. In Jupyter, select the **Run SQL queries in Notebook** heading, and then select
**Run**. The results are displayed inline as follows:
![PostgreSQL query](img/postgres-query.png)
![PostgreSQL query](img/postgres_query_v11_6.png)
You can try other operations, such as running shell scripts or interacting with a
Kubernetes cluster. Visit the

View File

@ -200,9 +200,20 @@ There are a few scenarios where a deploy key fails to push to a
- The deploy key has been [revoked](#revoke-project-access-of-a-deploy-key).
- **No one** is selected in [the **Allowed to push and merge** section](../repository/branches/protected.md#add-protection-to-existing-branches) of the protected branch.
All deploy keys are associated to an account. Since the permissions for an account can change, this might lead to scenarios where a deploy key that was working is suddenly unable to push to a protected branch.
This issue occurs because all deploy keys are associated to an account. Because the permissions for an account can change, this might lead to scenarios where a deploy key that was working is suddenly unable to push to a protected branch.
We recommend you create a service account, and associate a deploy key to the service account, for projects using deploy keys.
To resolve this issue, you can use the deploy keys API to create deploy keys for project service account users, instead of for your own users:
1. [Create a service account user](../../../api/group_service_accounts.md#create-a-service-account-user).
1. [Create a personal access token](../../../api/user_tokens.md#create-a-personal-access-token) for that service account user. This token must have at least the `api` scope.
1. [Invite the service account user to the project](../../profile/service_accounts.md#add-to-a-subgroup-or-project).
1. Use the deploy key API to [create a deploy key for the service account user](../../../api/deploy_keys.md#add-deploy-key):
```shell
curl --request POST --header "PRIVATE-TOKEN: <service_account_access_token>" --header "Content-Type: application/json" \
--data '{"title": "My deploy key", "key": "ssh-rsa AAAA...", "can_push": "true"}' \
"https://gitlab.example.com/api/v4/projects/5/deploy_keys/"
```
#### Identify deploy keys associated with non-member and blocked users

View File

@ -89,7 +89,7 @@ Commit message templates support these variables:
| `%{first_commit}` | Full message of the first commit in merge request diff. | `Update README.md` |
| `%{first_multiline_commit}` | Full message of the first commit that's not a merge commit and has more than one line in message body. Merge request title if all commits aren't multiline. | `Update README.md`<br><br>`Improved project description in readme file.` |
| `%{url}` | Full URL to the merge request. | `https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1` |
| `%{reviewed_by}` | Line-separated list of the merge request reviewers, based on users who submit a review via batch comments, in a `Reviewed-by` Git commit trailer format. | `Reviewed-by: Sidney Jones <sjones@example.com>` <br> `Reviewed-by: Zhang Wei <zwei@example.com>` |
| `%{reviewed_by}` | Line-separated list of the merge request reviewers, based on users who submit a review by using batch comments, in a `Reviewed-by` Git commit trailer format. | `Reviewed-by: Sidney Jones <sjones@example.com>` <br> `Reviewed-by: Zhang Wei <zwei@example.com>` |
| `%{approved_by}` | Line-separated list of the merge request approvers in a `Approved-by` Git commit trailer format. | `Approved-by: Sidney Jones <sjones@example.com>` <br> `Approved-by: Zhang Wei <zwei@example.com>` |
| `%{merged_by}` | User who merged the merge request. | `Alex Garcia <agarcia@example.com>` |
| `%{merge_request_author}` | Name and email of the merge request author. | `Zane Doe <zdoe@example.com>` |

View File

@ -233,7 +233,7 @@ To secure your custom domain with GitLab Pages you can opt by:
You can use any certificate satisfying the following requirements:
- A GitLab Pages website up and running accessible via a custom domain.
- A GitLab Pages website is up and running, accessible on a custom domain.
- **A PEM certificate**: it is the certificate generated by the CA,
which needs to be added to the field **Certificate (PEM)**.
- **An [intermediate certificate](https://en.wikipedia.org/wiki/Intermediate_certificate_authority)**: (aka "root certificate"), it is

View File

@ -125,7 +125,7 @@ If you are running a self-managed instance of GitLab,
### Configure GitLab Pages in a Helm Chart (Kubernetes) instance
To configure GitLab Pages on instances deployed via Helm chart (Kubernetes), use either:
To configure GitLab Pages on instances deployed with Helm chart (Kubernetes), use either:
- [The `gitlab-pages` subchart](https://docs.gitlab.com/charts/charts/gitlab/gitlab-pages/).
- [An external GitLab Pages instance](https://docs.gitlab.com/charts/advanced/external-gitlab-pages/).

View File

@ -136,7 +136,7 @@ Creating a release using a CI/CD job could potentially trigger multiple pipeline
- Tag first, release second:
1. A tag is created via UI or pushed.
1. A tag is created from the UI or pushed.
1. A tag pipeline is triggered, and runs `release` job.
1. A release is created.

View File

@ -65,7 +65,7 @@ Each link as an asset has the following attributes:
| `name` | The name of the link. | Yes |
| `url` | The URL to download a file. | Yes |
| `filepath` | The redirect link to the `url`. Must start with a slash (`/`). See [this section](#permanent-links-to-release-assets) for more information. | No |
| `link_type` | The content kind of what users can download via `url`. See [this section](#link-types) for more information. | No |
| `link_type` | The content kind of what users can download with `url`. See [this section](#link-types) for more information. | No |
#### Permanent links to release assets

View File

@ -316,7 +316,7 @@ notification email with the verification result.
If the verification failed, the email also contains details of the reason.
If the verification was successful, the custom email address is ready to be used.
You can now enable sending Service Desk emails via the custom email address.
You can now enable sending Service Desk emails with the custom email address.
#### Troubleshooting your configuration
@ -371,7 +371,7 @@ To troubleshoot this:
### Enable or disable the custom email address
After the custom email address has been verified, administrators can enable or disable sending Service Desk emails via the custom email address.
After the custom email address has been verified, administrators can enable or disable sending Service Desk emails with the custom email address.
To **enable** the custom email address:
@ -1006,7 +1006,7 @@ or completely separately.
1. GitLab offers two methods to transport emails from `mail_room` to the GitLab
application. You can configure the `delivery_method` for each email setting individually:
1. Recommended: `webhook` (default in GitLab 15.3 and later) sends the email payload via an API POST request to your GitLab
1. Recommended: `webhook` (default in GitLab 15.3 and later) sends the email payload with an API POST request to your GitLab
application. It uses a shared token to authenticate. If you choose this method,
make sure the `mail_room` process can access the API endpoint and distribute the shared
token across all application nodes.
@ -1019,7 +1019,7 @@ or completely separately.
gitlab_rails['incoming_email_delivery_method'] = "webhook"
# The URL that mail_room can contact. You can also use an internal URL or IP,
# just make sure mail_room can access the GitLab API via that address.
# just make sure mail_room can access the GitLab API with that address.
# Do not end with "/".
gitlab_rails['incoming_email_gitlab_url'] = "https://gitlab.example.com"
@ -1033,7 +1033,7 @@ or completely separately.
gitlab_rails['service_desk_email_delivery_method'] = "webhook"
# The URL that mail_room can contact. You can also use an internal URL or IP,
# just make sure mail_room can access the GitLab API via that address.
# just make sure mail_room can access the GitLab API with that address.
# Do not end with "/".
gitlab_rails['service_desk_email_gitlab_url'] = "https://gitlab.example.com"

View File

@ -37,7 +37,7 @@ are sent as emails:
![Service Desk reply email](img/service_desk_reply.png)
Any responses they send via email are displayed in the issue itself.
Any responses they send by email are displayed in the issue itself.
For additional information see [External participants](external_participants.md) and the
[headers used for treating email](../../../administration/incoming_email.md#accepted-headers).
@ -56,7 +56,9 @@ To create a Service Desk ticket from the UI:
1. Optional. Add a comment on the ticket to send an initial Service Desk email to the external participant.
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
<!-- vale gitlab_base.LatinTerms = NO -->
For a walkthrough, see [Create Service Desk tickets via the UI and API (GitLab 16.10)](https://youtu.be/ibUGNc2wifQ).
<!-- vale gitlab_base.LatinTerms = YES -->
<!-- Video published on 2024-03-05 -->
## As a responder to the issue

View File

@ -95,7 +95,7 @@ If the Go module is located under a private subgroup like
`gitlab.com/namespace/subgroup/go-module`, then the Git authentication doesn't work.
It happens, because `go get` makes an unauthenticated request to discover
the repository path.
Without an HTTP authentication via `.netrc` file, GitLab responds with
Without an HTTP authentication by using a `.netrc` file, GitLab responds with
`gitlab.com/namespace/subgroup.git` to prevent a security risk of exposing
the project's existence for unauthenticated users.
As a result, the Go module cannot be downloaded.

View File

@ -0,0 +1,10 @@
# frozen_string_literal: true
module Gitlab
module BackgroundMigration
class BackfillPackagesNugetSymbolsProjectId < BackfillDesiredShardingKeyJob
operation_name :backfill_packages_nuget_symbols_project_id
feature_category :package_registry
end
end
end

View File

@ -84,7 +84,7 @@ module Gitlab
return unless job[:stage]
unless job[:stage].is_a?(String) && job[:stage].in?(@stages)
error!("#{name} job: chosen stage does not exist; available stages are #{@stages.join(", ")}")
error!("#{name} job: chosen stage #{job[:stage]} does not exist; available stages are #{@stages.join(", ")}")
end
end

View File

@ -7372,6 +7372,9 @@ msgstr ""
msgid "Are you sure you want to delete this WebAuthn device?"
msgstr ""
msgid "Are you sure you want to delete this WebAuthn device? Enter your password to continue."
msgstr ""
msgid "Are you sure you want to delete this branch target?"
msgstr ""
@ -50253,9 +50256,6 @@ msgstr ""
msgid "SecurityReports|All tools"
msgstr ""
msgid "SecurityReports|Although it's rare to have no vulnerabilities, it can happen. Check your project's security configuration to make sure you've set up your security scans correctly."
msgstr ""
msgid "SecurityReports|Change status"
msgstr ""
@ -50376,6 +50376,9 @@ msgstr ""
msgid "SecurityReports|Historical view of open vulnerabilities in the default branch. Excludes vulnerabilities that were resolved or dismissed. %{linkStart}Learn more.%{linkEnd}"
msgstr ""
msgid "SecurityReports|If you were expecting vulnerabilities to be shown here, check that you've completed the %{linkStart}security scanning prerequisites%{linkEnd}, or check the other vulnerability types in the tabs above."
msgstr ""
msgid "SecurityReports|Image"
msgstr ""
@ -50427,7 +50430,7 @@ msgstr ""
msgid "SecurityReports|No longer detected"
msgstr ""
msgid "SecurityReports|No vulnerabilities found"
msgid "SecurityReports|No vulnerabilities to report"
msgstr ""
msgid "SecurityReports|Oops, something doesn't seem right."
@ -50588,6 +50591,9 @@ msgstr ""
msgid "SecurityReports|scanned resources"
msgstr ""
msgid "SecurityReports|security scanning prerequisites"
msgstr ""
msgid "SecurityTraining|Enable security training to learn how to fix vulnerabilities. View security training from selected educational providers relevant to the detected vulnerability."
msgstr ""
@ -64432,6 +64438,12 @@ msgstr ""
msgid "cannot contain HTML/XML tags, including any word between angle brackets (&lt;,&gt;)."
msgstr ""
msgid "cannot have more than %{count} frameworks"
msgstr ""
msgid "cannot have more than %{count} requirements"
msgstr ""
msgid "cannot include leading slash or directory traversal."
msgstr ""

View File

@ -5,10 +5,11 @@ require "spec_helper"
RSpec.describe Pajamas::AccordionItemComponent, type: :component, feature_category: :shared do
let(:title) { "This is a title" }
let(:content) { "This is the content" }
let(:button_options) { { class: 'my-class' } }
let(:state) { :opened }
before do
render_inline(described_class.new(title: title, state: state)) do |_c|
render_inline(described_class.new(title: title, state: state, button_options: button_options)) do |_c|
content
end
end
@ -56,4 +57,10 @@ RSpec.describe Pajamas::AccordionItemComponent, type: :component, feature_catego
expect(page).to have_selector('button[aria-expanded="false"]')
end
end
describe "button_options" do
it "correctly passes options to the button" do
expect(page).to have_selector('button.my-class')
end
end
end

View File

@ -449,13 +449,14 @@ RSpec.describe Profiles::TwoFactorAuthsController, feature_category: :system_acc
end
it 'disables OTP authenticator and leaves WebAuthn devices unaffected' do
expect(user.reload.two_factor_otp_enabled?).to eq(true)
expect(user.reload.two_factor_webauthn_enabled?).to eq(true)
expect(user.two_factor_otp_enabled?).to eq(true)
expect(user.two_factor_webauthn_enabled?).to eq(true)
go
expect(user.reload.two_factor_otp_enabled?).to eq(false)
expect(user.reload.two_factor_webauthn_enabled?).to eq(true)
user.reload
expect(user.two_factor_otp_enabled?).to eq(false)
expect(user.two_factor_webauthn_enabled?).to eq(true)
end
it 'redirects to profile_two_factor_auth_path' do
@ -480,13 +481,14 @@ RSpec.describe Profiles::TwoFactorAuthsController, feature_category: :system_acc
end
it 'leaves WebAuthn devices unaffected' do
expect(user.reload.two_factor_otp_enabled?).to eq(false)
expect(user.reload.two_factor_webauthn_enabled?).to eq(true)
expect(user.two_factor_otp_enabled?).to eq(false)
expect(user.two_factor_webauthn_enabled?).to eq(true)
go
expect(user.reload.two_factor_otp_enabled?).to eq(false)
expect(user.reload.two_factor_webauthn_enabled?).to eq(true)
user.reload
expect(user.two_factor_otp_enabled?).to eq(false)
expect(user.two_factor_webauthn_enabled?).to eq(true)
end
it 'redirects to profile_two_factor_auth_path' do
@ -505,4 +507,45 @@ RSpec.describe Profiles::TwoFactorAuthsController, feature_category: :system_acc
it_behaves_like 'user must enter a valid current password'
end
end
describe 'DELETE destroy_webauthn' do
let_it_be_with_reload(:user) do
create(:user, :two_factor_via_webauthn)
end
let(:webauthn_id) { user.webauthn_registrations.first.id }
let(:current_password) { user.password }
let(:destroy_webauthn) do
delete :destroy_webauthn, params: { id: webauthn_id, current_password: current_password }
end
def go
destroy_webauthn
end
it 'destroys the webauthn device' do
count = user.webauthn_registrations.count
go
user.reload
expect(user.webauthn_registrations.count).to eq(count - 1)
end
it 'redirects to the profile two factor authentication page' do
go
expect(response).to redirect_to profile_two_factor_auth_path
end
it 'calls the Webauthn::DestroyService' do
service = double
expect(Webauthn::DestroyService).to receive(:new).with(user, user, webauthn_id.to_s).and_return(service)
expect(service).to receive(:execute)
go
end
it_behaves_like 'user must enter a valid current password'
end
end

View File

@ -1,36 +0,0 @@
# frozen_string_literal: true
require 'spec_helper'
RSpec.describe Profiles::WebauthnRegistrationsController do
let(:user) { create(:user, :two_factor_via_webauthn) }
before do
sign_in(user)
end
describe '#destroy' do
let(:webauthn_id) { user.webauthn_registrations.first.id }
subject { delete :destroy, params: { id: webauthn_id } }
it 'redirects to the profile two factor authentication page' do
subject
expect(response).to redirect_to profile_two_factor_auth_path
end
it 'destroys the webauthn registration' do
expect { subject }.to change { user.webauthn_registrations.count }.by(-1)
end
it 'calls the Webauthn::DestroyService' do
service = double
expect(Webauthn::DestroyService).to receive(:new).with(user, user, webauthn_id.to_s).and_return(service)
expect(service).to receive(:execute)
subject
end
end
end

View File

@ -935,7 +935,7 @@ RSpec.describe Projects::PipelinesController, feature_category: :continuous_inte
expect(response).to have_gitlab_http_status(:bad_request)
expect(json_response['errors']).to eq([
'test job: chosen stage does not exist; available stages are .pre, build, test, deploy, .post'
'test job: chosen stage invalid does not exist; available stages are .pre, build, test, deploy, .post'
])
expect(json_response['warnings'][0]).to include(
'jobs:build may allow multiple pipelines to run for a single action due to `rules:when`'

View File

@ -158,6 +158,7 @@ RSpec.describe 'Database schema',
oauth_access_tokens: %w[resource_owner_id application_id],
oauth_applications: %w[owner_id],
oauth_device_grants: %w[resource_owner_id application_id],
packages_nuget_symbols: %w[project_id],
packages_package_files: %w[project_id],
p_ci_builds: %w[erased_by_id trigger_request_id partition_id auto_canceled_by_partition_id execution_config_id upstream_pipeline_partition_id],
p_ci_builds_metadata: %w[project_id build_id partition_id],

View File

@ -596,7 +596,7 @@ export const mockJobs = [
];
export const mockErrors = [
'"job_1 job: chosen stage does not exist; available stages are .pre, build, test, deploy, .post"',
'"job_1 job: chosen stage test does not exist; available stages are .pre, build, test, deploy, .post"',
];
export const mockWarnings = [

Some files were not shown because too many files have changed in this diff Show More